code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import re
_camel_words = re.compile(r"([A-Z][a-z0-9_]+)")
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return "_".join(
[
i.lower() for i in _camel_words.split(s)[1::2]
]
)
|
normal
|
{
"blob_id": "6c9f9363a95ea7dc97ccb45d0922f0531c5cfec9",
"index": 6572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-3": "<mask token>\n_camel_words = re.compile('([A-Z][a-z0-9_]+)')\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-4": "import re\n_camel_words = re.compile('([A-Z][a-z0-9_]+)')\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return '_'.join([i.lower() for i in _camel_words.split(s)[1::2]])\n",
"step-5": "import re\n\n\n_camel_words = re.compile(r\"([A-Z][a-z0-9_]+)\")\n\n\ndef _camel_to_snake(s):\n \"\"\" Convert CamelCase to snake_case.\n \"\"\"\n return \"_\".join(\n [\n i.lower() for i in _camel_words.split(s)[1::2]\n ]\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#anand python problem 2:29
#Write a function array to create an 2-dimensional array. The function should take both dimensions as arguments. Value of each element can be initialized to None:
#
def array_imp(row,col):
res=[[None]*col for i in range(row) ]
return res
if __name__=='__main__':
outs=array_imp(2,3)
print outs
|
normal
|
{
"blob_id": "b5835b676eb8ac814086f7482f172f48e2ad5a0a",
"index": 8189,
"step-1": "#anand python problem 2:29\n#Write a function array to create an 2-dimensional array. The function should take both dimensions as arguments. Value of each element can be initialized to None:\n#\n\ndef array_imp(row,col):\n\tres=[[None]*col for i in range(row) ]\n\treturn res\n\n\n\n\nif __name__=='__main__':\n\touts=array_imp(2,3)\n\tprint outs\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.1.4 on 2019-01-11 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devisa', '0021_auto_20190110_1256'),
]
operations = [
migrations.RemoveField(
model_name='entidade',
name='bairro',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cep',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cnes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_complemento',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_expedicao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_inicio_func',
),
migrations.RemoveField(
model_name='entidade',
name='ent_email',
),
migrations.RemoveField(
model_name='entidade',
name='ent_endereco',
),
migrations.RemoveField(
model_name='entidade',
name='ent_especializacao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fantasia',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fax',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fone',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_estadual',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_municipal',
),
migrations.RemoveField(
model_name='entidade',
name='ent_numero',
),
migrations.RemoveField(
model_name='entidade',
name='ent_obj_contrato_social',
),
migrations.RemoveField(
model_name='entidade',
name='ent_observacoes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_orgao_exp',
),
migrations.RemoveField(
model_name='entidade',
name='ent_pasta_num',
),
migrations.RemoveField(
model_name='entidade',
name='ent_registro_conselho',
),
migrations.RemoveField(
model_name='entidade',
name='ent_rg',
),
migrations.RemoveField(
model_name='entidade',
name='escolaridade',
),
migrations.RemoveField(
model_name='entidade',
name='formacao_profissional',
),
migrations.RemoveField(
model_name='entidade',
name='municipio',
),
migrations.RemoveField(
model_name='entidade',
name='natureza_juridica_dependencia',
),
]
|
normal
|
{
"blob_id": "34f79fa3de68b53f19220697815e5bae5270d056",
"index": 9274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-01-11 11:58\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('devisa', '0021_auto_20190110_1256'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='entidade',\n name='bairro',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cep',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cnes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_complemento',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_expedicao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_inicio_func',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_email',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_endereco',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_especializacao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fantasia',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fax',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fone',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_estadual',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_municipal',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_numero',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_obj_contrato_social',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_observacoes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_orgao_exp',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_pasta_num',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_registro_conselho',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_rg',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='escolaridade',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='formacao_profissional',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='municipio',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='natureza_juridica_dependencia',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def is_balanced(tree_root):
# Determine if the tree is superbalanced
if tree_root is None:
return True
nodeQ = [(tree_root, 0)]
depths = []
while len(nodeQ):
last_node, depth = nodeQ.pop()
if( not last_node.left ) and (not last_node.right ):
if depth not in depths:
depths.append(depth)
if ((len(depths) > 1) and (max(depths) - min(depths) > 1)):
return False
else:
if(last_node.left):
nodeQ.append((last_node.left, depth + 1))
if(last_node.right):
nodeQ.append((last_node.right, depth + 1))
return True
# store node pointer and depth as tuples
# pop together and store in variables node, depth
# append node.right, node.left
# put in while loop until list is empty
|
normal
|
{
"blob_id": "833c8234d829dfa1937392f0ad4952aeffa4e26d",
"index": 1150,
"step-1": "<mask token>\n",
"step-2": "def is_balanced(tree_root):\n if tree_root is None:\n return True\n nodeQ = [(tree_root, 0)]\n depths = []\n while len(nodeQ):\n last_node, depth = nodeQ.pop()\n if not last_node.left and not last_node.right:\n if depth not in depths:\n depths.append(depth)\n if len(depths) > 1 and max(depths) - min(depths) > 1:\n return False\n else:\n if last_node.left:\n nodeQ.append((last_node.left, depth + 1))\n if last_node.right:\n nodeQ.append((last_node.right, depth + 1))\n return True\n",
"step-3": "def is_balanced(tree_root):\n # Determine if the tree is superbalanced\n \n if tree_root is None:\n return True\n \n nodeQ = [(tree_root, 0)]\n depths = []\n \n while len(nodeQ):\n \n last_node, depth = nodeQ.pop()\n \n if( not last_node.left ) and (not last_node.right ):\n if depth not in depths:\n depths.append(depth)\n \n if ((len(depths) > 1) and (max(depths) - min(depths) > 1)):\n return False\n else:\n \n if(last_node.left):\n nodeQ.append((last_node.left, depth + 1))\n if(last_node.right):\n nodeQ.append((last_node.right, depth + 1))\n \n return True\n \n \n# store node pointer and depth as tuples\n# pop together and store in variables node, depth\n# append node.right, node.left\n# put in while loop until list is empty\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
a = [5, 4, 3, 2, 1]
a = [1, 2, 3, 7, 5, 6, 4, 8, 9]
import time
sorted = True
for i in range(0, len(a)):
print('main')
sorted = True
for j in range(1, len(a) - i):
if a[j] < a[j - 1]:
a[j], a[j - 1] = a[j - 1], a[j]
print('inner')
print(a)
time.sleep(1)
sorted = False
if sorted:
break
print(a)
|
normal
|
{
"blob_id": "30fbe52a5e3fb184a998fce43d716cffdaf0d2dc",
"index": 1790,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(a)):\n print('main')\n sorted = True\n for j in range(1, len(a) - i):\n if a[j] < a[j - 1]:\n a[j], a[j - 1] = a[j - 1], a[j]\n print('inner')\n print(a)\n time.sleep(1)\n sorted = False\n if sorted:\n break\nprint(a)\n",
"step-3": "a = [5, 4, 3, 2, 1]\na = [1, 2, 3, 7, 5, 6, 4, 8, 9]\n<mask token>\nsorted = True\nfor i in range(0, len(a)):\n print('main')\n sorted = True\n for j in range(1, len(a) - i):\n if a[j] < a[j - 1]:\n a[j], a[j - 1] = a[j - 1], a[j]\n print('inner')\n print(a)\n time.sleep(1)\n sorted = False\n if sorted:\n break\nprint(a)\n",
"step-4": "a = [5, 4, 3, 2, 1]\na = [1, 2, 3, 7, 5, 6, 4, 8, 9]\nimport time\nsorted = True\nfor i in range(0, len(a)):\n print('main')\n sorted = True\n for j in range(1, len(a) - i):\n if a[j] < a[j - 1]:\n a[j], a[j - 1] = a[j - 1], a[j]\n print('inner')\n print(a)\n time.sleep(1)\n sorted = False\n if sorted:\n break\nprint(a)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib import cm
import imageio
# # Backpack values
# fx = 7190.247 # lense focal length
# baseline = 174.945 # distance in mm between the two cameras (values from middlebury)
# units = 0.001 # depth units
# doffs=342.523 # x-difference of principal points, following https://vision.middlebury.edu/stereo/data/scenes2014/#description
# texture_threshold = 2000 # 10 by default
# Classroom values
doffs=113.186
baseline=237.604
fx = 3920.793
doffs=113.186
disparities=0
block=23
# # Backpack images
# imgL = cv2.imread('images/im0_left.png', cv2.IMREAD_GRAYSCALE)
# imgR = cv2.imread('images/im0_right.png', cv2.IMREAD_GRAYSCALE)
# Classroom images
imgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)
imgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)
plt.imshow(imgL, cmap="gray")
plt.axis('off')
plt.show()
sbm = cv2.StereoBM_create(numDisparities=disparities,blockSize=block)
# sbm.setTextureThreshold(texture_threshold)
# calculate disparities
disparity = sbm.compute(imgL, imgR)
print(disparity)
# show disparity
plt.imshow(disparity)
plt.axis('off')
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[disparity > 0] = (fx * baseline) / (doffs + disparity[disparity > 0])
plt.imshow(depth)
plt.show()
# convert from pfm file equation?
pfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')
pfm = np.asarray(pfm)
plt.imshow(pfm)
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[pfm > 0] = (fx * baseline) / (doffs + pfm[pfm > 0])
#print(depth)
plt.imshow(depth)
plt.axis('off')
plt.show()
|
normal
|
{
"blob_id": "14761cc2593556f58a7dc4e499db71456d7c7048",
"index": 3237,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\n<mask token>\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\n<mask token>\nplt.imshow(depth)\nplt.show()\n<mask token>\nplt.imshow(pfm)\nplt.show()\n<mask token>\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-3": "<mask token>\ndoffs = 113.186\nbaseline = 237.604\nfx = 3920.793\ndoffs = 113.186\ndisparities = 0\nblock = 23\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\nsbm = cv2.StereoBM_create(numDisparities=disparities, blockSize=block)\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = fx * baseline / (doffs + disparity[disparity > 0])\nplt.imshow(depth)\nplt.show()\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = fx * baseline / (doffs + pfm[pfm > 0])\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-4": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport imageio\ndoffs = 113.186\nbaseline = 237.604\nfx = 3920.793\ndoffs = 113.186\ndisparities = 0\nblock = 23\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\nplt.imshow(imgL, cmap='gray')\nplt.axis('off')\nplt.show()\nsbm = cv2.StereoBM_create(numDisparities=disparities, blockSize=block)\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = fx * baseline / (doffs + disparity[disparity > 0])\nplt.imshow(depth)\nplt.show()\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = fx * baseline / (doffs + pfm[pfm > 0])\nplt.imshow(depth)\nplt.axis('off')\nplt.show()\n",
"step-5": "import numpy as np\nimport cv2 \nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport imageio\n\n# # Backpack values\n# fx = 7190.247 # lense focal length\n# baseline = 174.945 # distance in mm between the two cameras (values from middlebury)\n# units = 0.001 # depth units\n# doffs=342.523 # x-difference of principal points, following https://vision.middlebury.edu/stereo/data/scenes2014/#description\n\n# texture_threshold = 2000 # 10 by default\n\n# Classroom values\ndoffs=113.186\nbaseline=237.604\nfx = 3920.793\ndoffs=113.186\n\ndisparities=0\nblock=23\n\n# # Backpack images\n# imgL = cv2.imread('images/im0_left.png', cv2.IMREAD_GRAYSCALE)\n# imgR = cv2.imread('images/im0_right.png', cv2.IMREAD_GRAYSCALE)\n\n# Classroom images\nimgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)\nimgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)\n\nplt.imshow(imgL, cmap=\"gray\")\nplt.axis('off')\nplt.show()\n\nsbm = cv2.StereoBM_create(numDisparities=disparities,blockSize=block)\n# sbm.setTextureThreshold(texture_threshold)\n\n\n# calculate disparities\ndisparity = sbm.compute(imgL, imgR)\nprint(disparity)\n# show disparity\nplt.imshow(disparity)\nplt.axis('off')\nplt.show()\n\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[disparity > 0] = (fx * baseline) / (doffs + disparity[disparity > 0])\n\nplt.imshow(depth)\nplt.show()\n\n\n# convert from pfm file equation?\npfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')\npfm = np.asarray(pfm)\nplt.imshow(pfm)\nplt.show()\n\ndepth = np.zeros(shape=imgL.shape).astype(float)\ndepth[pfm > 0] = (fx * baseline) / (doffs + pfm[pfm > 0])\n#print(depth)\n\nplt.imshow(depth)\nplt.axis('off')\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import weather_forecast
from weather_forecast import forecast
from googlesearch import search
from youtube_search import YoutubeSearch
import yfinance as yf
import smtplib as bot
import imaplib as imap
import email
import time
from GoogleNews import GoogleNews
import json
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
from datetime import date
import random
today = date.today()
d1 = today.strftime("%Y-%m-%d")
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. ')
if request == 'google':
query = input('Search: ')
print(search(query, num_results = 3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period= '1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host= 'smtp.gmail.com', port= 587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to ,content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
while True:
# Take input from the user
choice = input("Enter choice( + / - / * / / ): ")
# Check if choice is one of the four options
if choice in ('+', '-', '*', '/'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '+':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '-':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '*':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '/':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input")
elif request == 'game':
type = input('Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors ')
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ',
'4': ' ', '5': ' ', '6': ' ',
'1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
''' We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. '''
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
# Now we'll write the main function which has all the gameplay functionality.
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn + ".Move to which place?")
if turn == 'O':
choice = random.randint(1,9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print("That place is already filled.\nMove to which place?")
continue
# Now we will check if player X or O has won,for every move after 5 moves.
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
# If neither X nor O wins and the board is full, we'll declare the result as 'tie'.
if count == 9:
print("\nGame Over.\n")
print("It's a Tie!!")
# Now we have to change the player after every move.
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print("Winning Rules of the Rock paper scissor game as follows: \n"
+ "Rock vs paper->paper wins \n"
+ "Rock vs scissor->Rock wins \n"
+ "paper vs scissor->scissor wins \n")
print("Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n")
choice = int(input("User turn: "))
# OR is the short-circuit operator
# if any one of the condition is true
# then it return True value
# looping until user enter invalid input
while choice > 3 or choice < 1:
choice = int(input("enter valid input: "))
# initialize value of choice_name variable
# corresponding to the choice value
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
# print user choice
print("user choice is: " + choice_name)
print("\nNow its computer turn.......")
# Computer chooses randomly any number
# among 1 , 2 and 3. Using randint method
# of random module
comp_choice = random.randint(1, 3)
# looping until comp_choice value
# is equal to the choice value
while comp_choice == choice:
comp_choice = random.randint(1, 3)
# initialize value of comp_choice_name
# variable corresponding to the choice value
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print("Computer choice is: " + comp_choice_name)
print(choice_name + " V/s " + comp_choice_name)
# condition for winning
if ((choice == 1 and comp_choice == 2) or
(choice == 2 and comp_choice == 1)):
print("paper wins => ", end="")
result = "paper"
elif ((choice == 1 and comp_choice == 3) or
(choice == 3 and comp_choice == 1)):
print("Rock wins =>", end="")
result = "Rock"
else:
print("scissor wins =>", end="")
result = "scissor"
# Printing either user or computer wins
if result == choice_name:
print("<== User wins ==>")
else:
print("<== Computer wins ==>")
'''
mail = imap.IMAP4_SSL(server)
mail.login(address, password)
mail.select('inbox')
status, data = mail.search(None, 'ALL')
ids = []
for block in data:
ids += block.split()
for i in ids:
status, data = mail.fetch(i, '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
message = email.message_from_bytes(response_part[1])
mail_from = message['from']
mail_subject = message['subject']
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
if part.get_content_type() == 'text/plain':
mail_content += part.get_payload()
else:
mail_content = message.get_payload()
print(mail_from)
s.quit()
'''
game()
|
normal
|
{
"blob_id": "60354f25f55136d4e873d118cfe048cf08c06e39",
"index": 1587,
"step-1": "<mask token>\n\n\ndef game():\n for i in range(1000):\n request = input('Auto-Bot at your service. Please state your request. '\n )\n if request == 'google':\n query = input('Search: ')\n print(search(query, num_results=3))\n elif request == 'stocks':\n ticker = input('Ticker Symbol: ')\n realticker = yf.Ticker(ticker)\n print(realticker.history(period='1m'))\n elif request == 'weather':\n place = input('City: ')\n weather = weather_forecast.forecast(place=place, time=\n current_time, date=d1)\n elif request == 'email':\n to = input('Email address: ')\n content = input('What do you want to say? ')\n address = '[email protected]'\n password = 'AutoBot1'\n server = 'imap.gmail.com'\n s = bot.SMTP(host='smtp.gmail.com', port=587)\n s.starttls()\n s.login(address, password)\n s.ehlo()\n s.sendmail(address, to, content)\n {}\n elif request == 'song':\n song = input('Song name: ')\n results = YoutubeSearch(song, max_results=1).to_dict()\n dict = results[0].values()\n newdict = list(dict)\n url = newdict[7]\n print(f'https://www.youtube.com{url}')\n elif request == 'news':\n news = input('Search news: ')\n gn = GoogleNews()\n top = gn.search(news)\n newnews = gn.results()\n dict = list(newnews[0].values())\n dicttwo = list(newnews[1].values())\n dictthree = list(newnews[2].values())\n dictfour = list(newnews[3].values())\n dictfive = list(newnews[4].values())\n title1 = dict[0]\n title2 = dicttwo[0]\n title3 = dictthree[0]\n title4 = dictfour[0]\n title5 = dictfive[0]\n src1 = dict[1]\n src2 = dicttwo[1]\n src3 = dictthree[1]\n src4 = dictfour[1]\n src5 = dictfive[1]\n cap1 = dict[4]\n cap2 = dicttwo[4]\n cap3 = dictthree[4]\n cap4 = dictfour[4]\n cap5 = dictfive[4]\n url1 = dict[5]\n url2 = dicttwo[5]\n url3 = dictthree[5]\n url4 = dictfour[5]\n url5 = dictfive[5]\n print(f'Title: {title1}')\n print(f'Source: {src1}')\n print(f'Caption: {cap1}')\n print(f'Url: {url1}')\n print(f'Title: {title2}')\n print(f'Source: {src2}')\n print(f'Caption: {cap2}')\n print(f'Url: {url2}')\n print(f'Title: {title3}')\n print(f'Source: {src3}')\n print(f'Caption: {cap3}')\n print(f'Url: {url3}')\n print(f'Title: {title4}')\n print(f'Source: {src4}')\n print(f'Caption: {cap4}')\n print(f'Url: {url4}')\n print(f'Title: {title5}')\n print(f'Source: {src5}')\n print(f'Caption: {cap5}')\n print(f'Url: {url5}')\n elif request == 'math':\n\n def add(x, y):\n return x + y\n\n def subtract(x, y):\n return x - y\n\n def multiply(x, y):\n return x * y\n\n def divide(x, y):\n return x / y\n while True:\n choice = input('Enter choice( + / - / * / / ): ')\n if choice in ('+', '-', '*', '/'):\n num1 = float(input('Enter first number: '))\n num2 = float(input('Enter second number: '))\n if choice == '+':\n print(num1, '+', num2, '=', add(num1, num2))\n elif choice == '-':\n print(num1, '-', num2, '=', subtract(num1, num2))\n elif choice == '*':\n print(num1, '*', num2, '=', multiply(num1, num2))\n elif choice == '/':\n print(num1, '/', num2, '=', divide(num1, num2))\n break\n else:\n print('Invalid Input')\n elif request == 'game':\n type = input(\n 'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '\n )\n if type == '1':\n unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':\n ' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}\n board_keys = []\n for key in theBoard:\n board_keys.append(key)\n \"\"\" We will have to print the updated board after every move in the game and \n thus we will make a function in which we'll define the printBoard function\n so that we can easily print the board everytime by calling this function. \"\"\"\n\n def printBoard(board):\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\n print('-+-+-')\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\n print('-+-+-')\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\n\n def tictactoe():\n turn = 'X'\n count = 0\n for i in range(10):\n printBoard(theBoard)\n print(\"It's your turn,\" + turn +\n '.Move to which place?')\n if turn == 'O':\n choice = random.randint(1, 9)\n choice = unused_keys[choice]\n if theBoard[f'{choice}'] == ' ':\n theBoard[choice] = turn\n unused_keys.remove(choice)\n count += 1\n elif turn == 'X':\n move = input()\n if theBoard[move] == ' ':\n theBoard[move] = turn\n unused_keys.remove(move)\n count += 1\n else:\n print(\n 'That place is already filled.\\nMove to which place?'\n )\n continue\n if count >= 5:\n if theBoard['7'] == theBoard['8'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['4'] == theBoard['5'] == theBoard['6'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['2'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['4'] == theBoard['7'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['2'] == theBoard['5'] == theBoard['8'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['3'] == theBoard['6'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['7'] == theBoard['5'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['5'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n if count == 9:\n print('\\nGame Over.\\n')\n print(\"It's a Tie!!\")\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\n tictactoe()\n elif type == '2':\n print(\n 'Winning Rules of the Rock paper scissor game as follows: \\n'\n + \"\"\"Rock vs paper->paper wins \n\"\"\" +\n 'Rock vs scissor->Rock wins \\n' +\n 'paper vs scissor->scissor wins \\n')\n print('Enter choice \\n 1. Rock \\n 2. paper \\n 3. scissor \\n')\n choice = int(input('User turn: '))\n while choice > 3 or choice < 1:\n choice = int(input('enter valid input: '))\n if choice == 1:\n choice_name = 'Rock'\n elif choice == 2:\n choice_name = 'paper'\n else:\n choice_name = 'scissor'\n print('user choice is: ' + choice_name)\n print('\\nNow its computer turn.......')\n comp_choice = random.randint(1, 3)\n while comp_choice == choice:\n comp_choice = random.randint(1, 3)\n if comp_choice == 1:\n comp_choice_name = 'Rock'\n elif comp_choice == 2:\n comp_choice_name = 'paper'\n else:\n comp_choice_name = 'scissor'\n print('Computer choice is: ' + comp_choice_name)\n print(choice_name + ' V/s ' + comp_choice_name)\n if (choice == 1 and comp_choice == 2 or choice == 2 and \n comp_choice == 1):\n print('paper wins => ', end='')\n result = 'paper'\n elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:\n print('Rock wins =>', end='')\n result = 'Rock'\n else:\n print('scissor wins =>', end='')\n result = 'scissor'\n if result == choice_name:\n print('<== User wins ==>')\n else:\n print('<== Computer wins ==>')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef game():\n for i in range(1000):\n request = input('Auto-Bot at your service. Please state your request. '\n )\n if request == 'google':\n query = input('Search: ')\n print(search(query, num_results=3))\n elif request == 'stocks':\n ticker = input('Ticker Symbol: ')\n realticker = yf.Ticker(ticker)\n print(realticker.history(period='1m'))\n elif request == 'weather':\n place = input('City: ')\n weather = weather_forecast.forecast(place=place, time=\n current_time, date=d1)\n elif request == 'email':\n to = input('Email address: ')\n content = input('What do you want to say? ')\n address = '[email protected]'\n password = 'AutoBot1'\n server = 'imap.gmail.com'\n s = bot.SMTP(host='smtp.gmail.com', port=587)\n s.starttls()\n s.login(address, password)\n s.ehlo()\n s.sendmail(address, to, content)\n {}\n elif request == 'song':\n song = input('Song name: ')\n results = YoutubeSearch(song, max_results=1).to_dict()\n dict = results[0].values()\n newdict = list(dict)\n url = newdict[7]\n print(f'https://www.youtube.com{url}')\n elif request == 'news':\n news = input('Search news: ')\n gn = GoogleNews()\n top = gn.search(news)\n newnews = gn.results()\n dict = list(newnews[0].values())\n dicttwo = list(newnews[1].values())\n dictthree = list(newnews[2].values())\n dictfour = list(newnews[3].values())\n dictfive = list(newnews[4].values())\n title1 = dict[0]\n title2 = dicttwo[0]\n title3 = dictthree[0]\n title4 = dictfour[0]\n title5 = dictfive[0]\n src1 = dict[1]\n src2 = dicttwo[1]\n src3 = dictthree[1]\n src4 = dictfour[1]\n src5 = dictfive[1]\n cap1 = dict[4]\n cap2 = dicttwo[4]\n cap3 = dictthree[4]\n cap4 = dictfour[4]\n cap5 = dictfive[4]\n url1 = dict[5]\n url2 = dicttwo[5]\n url3 = dictthree[5]\n url4 = dictfour[5]\n url5 = dictfive[5]\n print(f'Title: {title1}')\n print(f'Source: {src1}')\n print(f'Caption: {cap1}')\n print(f'Url: {url1}')\n print(f'Title: {title2}')\n print(f'Source: {src2}')\n print(f'Caption: {cap2}')\n print(f'Url: {url2}')\n print(f'Title: {title3}')\n print(f'Source: {src3}')\n print(f'Caption: {cap3}')\n print(f'Url: {url3}')\n print(f'Title: {title4}')\n print(f'Source: {src4}')\n print(f'Caption: {cap4}')\n print(f'Url: {url4}')\n print(f'Title: {title5}')\n print(f'Source: {src5}')\n print(f'Caption: {cap5}')\n print(f'Url: {url5}')\n elif request == 'math':\n\n def add(x, y):\n return x + y\n\n def subtract(x, y):\n return x - y\n\n def multiply(x, y):\n return x * y\n\n def divide(x, y):\n return x / y\n while True:\n choice = input('Enter choice( + / - / * / / ): ')\n if choice in ('+', '-', '*', '/'):\n num1 = float(input('Enter first number: '))\n num2 = float(input('Enter second number: '))\n if choice == '+':\n print(num1, '+', num2, '=', add(num1, num2))\n elif choice == '-':\n print(num1, '-', num2, '=', subtract(num1, num2))\n elif choice == '*':\n print(num1, '*', num2, '=', multiply(num1, num2))\n elif choice == '/':\n print(num1, '/', num2, '=', divide(num1, num2))\n break\n else:\n print('Invalid Input')\n elif request == 'game':\n type = input(\n 'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '\n )\n if type == '1':\n unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':\n ' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}\n board_keys = []\n for key in theBoard:\n board_keys.append(key)\n \"\"\" We will have to print the updated board after every move in the game and \n thus we will make a function in which we'll define the printBoard function\n so that we can easily print the board everytime by calling this function. \"\"\"\n\n def printBoard(board):\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\n print('-+-+-')\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\n print('-+-+-')\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\n\n def tictactoe():\n turn = 'X'\n count = 0\n for i in range(10):\n printBoard(theBoard)\n print(\"It's your turn,\" + turn +\n '.Move to which place?')\n if turn == 'O':\n choice = random.randint(1, 9)\n choice = unused_keys[choice]\n if theBoard[f'{choice}'] == ' ':\n theBoard[choice] = turn\n unused_keys.remove(choice)\n count += 1\n elif turn == 'X':\n move = input()\n if theBoard[move] == ' ':\n theBoard[move] = turn\n unused_keys.remove(move)\n count += 1\n else:\n print(\n 'That place is already filled.\\nMove to which place?'\n )\n continue\n if count >= 5:\n if theBoard['7'] == theBoard['8'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['4'] == theBoard['5'] == theBoard['6'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['2'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['4'] == theBoard['7'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['2'] == theBoard['5'] == theBoard['8'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['3'] == theBoard['6'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['7'] == theBoard['5'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['5'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n if count == 9:\n print('\\nGame Over.\\n')\n print(\"It's a Tie!!\")\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\n tictactoe()\n elif type == '2':\n print(\n 'Winning Rules of the Rock paper scissor game as follows: \\n'\n + \"\"\"Rock vs paper->paper wins \n\"\"\" +\n 'Rock vs scissor->Rock wins \\n' +\n 'paper vs scissor->scissor wins \\n')\n print('Enter choice \\n 1. Rock \\n 2. paper \\n 3. scissor \\n')\n choice = int(input('User turn: '))\n while choice > 3 or choice < 1:\n choice = int(input('enter valid input: '))\n if choice == 1:\n choice_name = 'Rock'\n elif choice == 2:\n choice_name = 'paper'\n else:\n choice_name = 'scissor'\n print('user choice is: ' + choice_name)\n print('\\nNow its computer turn.......')\n comp_choice = random.randint(1, 3)\n while comp_choice == choice:\n comp_choice = random.randint(1, 3)\n if comp_choice == 1:\n comp_choice_name = 'Rock'\n elif comp_choice == 2:\n comp_choice_name = 'paper'\n else:\n comp_choice_name = 'scissor'\n print('Computer choice is: ' + comp_choice_name)\n print(choice_name + ' V/s ' + comp_choice_name)\n if (choice == 1 and comp_choice == 2 or choice == 2 and \n comp_choice == 1):\n print('paper wins => ', end='')\n result = 'paper'\n elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:\n print('Rock wins =>', end='')\n result = 'Rock'\n else:\n print('scissor wins =>', end='')\n result = 'scissor'\n if result == choice_name:\n print('<== User wins ==>')\n else:\n print('<== Computer wins ==>')\n\n\n<mask token>\ngame()\n",
"step-3": "<mask token>\nt = time.localtime()\ncurrent_time = time.strftime('%H:%M:%S', t)\n<mask token>\ntoday = date.today()\nd1 = today.strftime('%Y-%m-%d')\n\n\ndef game():\n for i in range(1000):\n request = input('Auto-Bot at your service. Please state your request. '\n )\n if request == 'google':\n query = input('Search: ')\n print(search(query, num_results=3))\n elif request == 'stocks':\n ticker = input('Ticker Symbol: ')\n realticker = yf.Ticker(ticker)\n print(realticker.history(period='1m'))\n elif request == 'weather':\n place = input('City: ')\n weather = weather_forecast.forecast(place=place, time=\n current_time, date=d1)\n elif request == 'email':\n to = input('Email address: ')\n content = input('What do you want to say? ')\n address = '[email protected]'\n password = 'AutoBot1'\n server = 'imap.gmail.com'\n s = bot.SMTP(host='smtp.gmail.com', port=587)\n s.starttls()\n s.login(address, password)\n s.ehlo()\n s.sendmail(address, to, content)\n {}\n elif request == 'song':\n song = input('Song name: ')\n results = YoutubeSearch(song, max_results=1).to_dict()\n dict = results[0].values()\n newdict = list(dict)\n url = newdict[7]\n print(f'https://www.youtube.com{url}')\n elif request == 'news':\n news = input('Search news: ')\n gn = GoogleNews()\n top = gn.search(news)\n newnews = gn.results()\n dict = list(newnews[0].values())\n dicttwo = list(newnews[1].values())\n dictthree = list(newnews[2].values())\n dictfour = list(newnews[3].values())\n dictfive = list(newnews[4].values())\n title1 = dict[0]\n title2 = dicttwo[0]\n title3 = dictthree[0]\n title4 = dictfour[0]\n title5 = dictfive[0]\n src1 = dict[1]\n src2 = dicttwo[1]\n src3 = dictthree[1]\n src4 = dictfour[1]\n src5 = dictfive[1]\n cap1 = dict[4]\n cap2 = dicttwo[4]\n cap3 = dictthree[4]\n cap4 = dictfour[4]\n cap5 = dictfive[4]\n url1 = dict[5]\n url2 = dicttwo[5]\n url3 = dictthree[5]\n url4 = dictfour[5]\n url5 = dictfive[5]\n print(f'Title: {title1}')\n print(f'Source: {src1}')\n print(f'Caption: {cap1}')\n print(f'Url: {url1}')\n print(f'Title: {title2}')\n print(f'Source: {src2}')\n print(f'Caption: {cap2}')\n print(f'Url: {url2}')\n print(f'Title: {title3}')\n print(f'Source: {src3}')\n print(f'Caption: {cap3}')\n print(f'Url: {url3}')\n print(f'Title: {title4}')\n print(f'Source: {src4}')\n print(f'Caption: {cap4}')\n print(f'Url: {url4}')\n print(f'Title: {title5}')\n print(f'Source: {src5}')\n print(f'Caption: {cap5}')\n print(f'Url: {url5}')\n elif request == 'math':\n\n def add(x, y):\n return x + y\n\n def subtract(x, y):\n return x - y\n\n def multiply(x, y):\n return x * y\n\n def divide(x, y):\n return x / y\n while True:\n choice = input('Enter choice( + / - / * / / ): ')\n if choice in ('+', '-', '*', '/'):\n num1 = float(input('Enter first number: '))\n num2 = float(input('Enter second number: '))\n if choice == '+':\n print(num1, '+', num2, '=', add(num1, num2))\n elif choice == '-':\n print(num1, '-', num2, '=', subtract(num1, num2))\n elif choice == '*':\n print(num1, '*', num2, '=', multiply(num1, num2))\n elif choice == '/':\n print(num1, '/', num2, '=', divide(num1, num2))\n break\n else:\n print('Invalid Input')\n elif request == 'game':\n type = input(\n 'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '\n )\n if type == '1':\n unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':\n ' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}\n board_keys = []\n for key in theBoard:\n board_keys.append(key)\n \"\"\" We will have to print the updated board after every move in the game and \n thus we will make a function in which we'll define the printBoard function\n so that we can easily print the board everytime by calling this function. \"\"\"\n\n def printBoard(board):\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\n print('-+-+-')\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\n print('-+-+-')\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\n\n def tictactoe():\n turn = 'X'\n count = 0\n for i in range(10):\n printBoard(theBoard)\n print(\"It's your turn,\" + turn +\n '.Move to which place?')\n if turn == 'O':\n choice = random.randint(1, 9)\n choice = unused_keys[choice]\n if theBoard[f'{choice}'] == ' ':\n theBoard[choice] = turn\n unused_keys.remove(choice)\n count += 1\n elif turn == 'X':\n move = input()\n if theBoard[move] == ' ':\n theBoard[move] = turn\n unused_keys.remove(move)\n count += 1\n else:\n print(\n 'That place is already filled.\\nMove to which place?'\n )\n continue\n if count >= 5:\n if theBoard['7'] == theBoard['8'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['4'] == theBoard['5'] == theBoard['6'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['2'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['4'] == theBoard['7'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['2'] == theBoard['5'] == theBoard['8'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['3'] == theBoard['6'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['7'] == theBoard['5'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['5'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n if count == 9:\n print('\\nGame Over.\\n')\n print(\"It's a Tie!!\")\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\n tictactoe()\n elif type == '2':\n print(\n 'Winning Rules of the Rock paper scissor game as follows: \\n'\n + \"\"\"Rock vs paper->paper wins \n\"\"\" +\n 'Rock vs scissor->Rock wins \\n' +\n 'paper vs scissor->scissor wins \\n')\n print('Enter choice \\n 1. Rock \\n 2. paper \\n 3. scissor \\n')\n choice = int(input('User turn: '))\n while choice > 3 or choice < 1:\n choice = int(input('enter valid input: '))\n if choice == 1:\n choice_name = 'Rock'\n elif choice == 2:\n choice_name = 'paper'\n else:\n choice_name = 'scissor'\n print('user choice is: ' + choice_name)\n print('\\nNow its computer turn.......')\n comp_choice = random.randint(1, 3)\n while comp_choice == choice:\n comp_choice = random.randint(1, 3)\n if comp_choice == 1:\n comp_choice_name = 'Rock'\n elif comp_choice == 2:\n comp_choice_name = 'paper'\n else:\n comp_choice_name = 'scissor'\n print('Computer choice is: ' + comp_choice_name)\n print(choice_name + ' V/s ' + comp_choice_name)\n if (choice == 1 and comp_choice == 2 or choice == 2 and \n comp_choice == 1):\n print('paper wins => ', end='')\n result = 'paper'\n elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:\n print('Rock wins =>', end='')\n result = 'Rock'\n else:\n print('scissor wins =>', end='')\n result = 'scissor'\n if result == choice_name:\n print('<== User wins ==>')\n else:\n print('<== Computer wins ==>')\n\n\n<mask token>\ngame()\n",
"step-4": "import weather_forecast\nfrom weather_forecast import forecast\nfrom googlesearch import search\nfrom youtube_search import YoutubeSearch\nimport yfinance as yf\nimport smtplib as bot\nimport imaplib as imap\nimport email\nimport time\nfrom GoogleNews import GoogleNews\nimport json\nt = time.localtime()\ncurrent_time = time.strftime('%H:%M:%S', t)\nfrom datetime import date\nimport random\ntoday = date.today()\nd1 = today.strftime('%Y-%m-%d')\n\n\ndef game():\n for i in range(1000):\n request = input('Auto-Bot at your service. Please state your request. '\n )\n if request == 'google':\n query = input('Search: ')\n print(search(query, num_results=3))\n elif request == 'stocks':\n ticker = input('Ticker Symbol: ')\n realticker = yf.Ticker(ticker)\n print(realticker.history(period='1m'))\n elif request == 'weather':\n place = input('City: ')\n weather = weather_forecast.forecast(place=place, time=\n current_time, date=d1)\n elif request == 'email':\n to = input('Email address: ')\n content = input('What do you want to say? ')\n address = '[email protected]'\n password = 'AutoBot1'\n server = 'imap.gmail.com'\n s = bot.SMTP(host='smtp.gmail.com', port=587)\n s.starttls()\n s.login(address, password)\n s.ehlo()\n s.sendmail(address, to, content)\n {}\n elif request == 'song':\n song = input('Song name: ')\n results = YoutubeSearch(song, max_results=1).to_dict()\n dict = results[0].values()\n newdict = list(dict)\n url = newdict[7]\n print(f'https://www.youtube.com{url}')\n elif request == 'news':\n news = input('Search news: ')\n gn = GoogleNews()\n top = gn.search(news)\n newnews = gn.results()\n dict = list(newnews[0].values())\n dicttwo = list(newnews[1].values())\n dictthree = list(newnews[2].values())\n dictfour = list(newnews[3].values())\n dictfive = list(newnews[4].values())\n title1 = dict[0]\n title2 = dicttwo[0]\n title3 = dictthree[0]\n title4 = dictfour[0]\n title5 = dictfive[0]\n src1 = dict[1]\n src2 = dicttwo[1]\n src3 = dictthree[1]\n src4 = dictfour[1]\n src5 = dictfive[1]\n cap1 = dict[4]\n cap2 = dicttwo[4]\n cap3 = dictthree[4]\n cap4 = dictfour[4]\n cap5 = dictfive[4]\n url1 = dict[5]\n url2 = dicttwo[5]\n url3 = dictthree[5]\n url4 = dictfour[5]\n url5 = dictfive[5]\n print(f'Title: {title1}')\n print(f'Source: {src1}')\n print(f'Caption: {cap1}')\n print(f'Url: {url1}')\n print(f'Title: {title2}')\n print(f'Source: {src2}')\n print(f'Caption: {cap2}')\n print(f'Url: {url2}')\n print(f'Title: {title3}')\n print(f'Source: {src3}')\n print(f'Caption: {cap3}')\n print(f'Url: {url3}')\n print(f'Title: {title4}')\n print(f'Source: {src4}')\n print(f'Caption: {cap4}')\n print(f'Url: {url4}')\n print(f'Title: {title5}')\n print(f'Source: {src5}')\n print(f'Caption: {cap5}')\n print(f'Url: {url5}')\n elif request == 'math':\n\n def add(x, y):\n return x + y\n\n def subtract(x, y):\n return x - y\n\n def multiply(x, y):\n return x * y\n\n def divide(x, y):\n return x / y\n while True:\n choice = input('Enter choice( + / - / * / / ): ')\n if choice in ('+', '-', '*', '/'):\n num1 = float(input('Enter first number: '))\n num2 = float(input('Enter second number: '))\n if choice == '+':\n print(num1, '+', num2, '=', add(num1, num2))\n elif choice == '-':\n print(num1, '-', num2, '=', subtract(num1, num2))\n elif choice == '*':\n print(num1, '*', num2, '=', multiply(num1, num2))\n elif choice == '/':\n print(num1, '/', num2, '=', divide(num1, num2))\n break\n else:\n print('Invalid Input')\n elif request == 'game':\n type = input(\n 'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '\n )\n if type == '1':\n unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':\n ' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}\n board_keys = []\n for key in theBoard:\n board_keys.append(key)\n \"\"\" We will have to print the updated board after every move in the game and \n thus we will make a function in which we'll define the printBoard function\n so that we can easily print the board everytime by calling this function. \"\"\"\n\n def printBoard(board):\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\n print('-+-+-')\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\n print('-+-+-')\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\n\n def tictactoe():\n turn = 'X'\n count = 0\n for i in range(10):\n printBoard(theBoard)\n print(\"It's your turn,\" + turn +\n '.Move to which place?')\n if turn == 'O':\n choice = random.randint(1, 9)\n choice = unused_keys[choice]\n if theBoard[f'{choice}'] == ' ':\n theBoard[choice] = turn\n unused_keys.remove(choice)\n count += 1\n elif turn == 'X':\n move = input()\n if theBoard[move] == ' ':\n theBoard[move] = turn\n unused_keys.remove(move)\n count += 1\n else:\n print(\n 'That place is already filled.\\nMove to which place?'\n )\n continue\n if count >= 5:\n if theBoard['7'] == theBoard['8'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['4'] == theBoard['5'] == theBoard['6'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['2'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['4'] == theBoard['7'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['2'] == theBoard['5'] == theBoard['8'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['3'] == theBoard['6'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['7'] == theBoard['5'] == theBoard['3'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n elif theBoard['1'] == theBoard['5'] == theBoard['9'\n ] != ' ':\n printBoard(theBoard)\n print('\\nGame Over.\\n')\n print(' **** ' + turn + ' won. ****')\n break\n if count == 9:\n print('\\nGame Over.\\n')\n print(\"It's a Tie!!\")\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\n tictactoe()\n elif type == '2':\n print(\n 'Winning Rules of the Rock paper scissor game as follows: \\n'\n + \"\"\"Rock vs paper->paper wins \n\"\"\" +\n 'Rock vs scissor->Rock wins \\n' +\n 'paper vs scissor->scissor wins \\n')\n print('Enter choice \\n 1. Rock \\n 2. paper \\n 3. scissor \\n')\n choice = int(input('User turn: '))\n while choice > 3 or choice < 1:\n choice = int(input('enter valid input: '))\n if choice == 1:\n choice_name = 'Rock'\n elif choice == 2:\n choice_name = 'paper'\n else:\n choice_name = 'scissor'\n print('user choice is: ' + choice_name)\n print('\\nNow its computer turn.......')\n comp_choice = random.randint(1, 3)\n while comp_choice == choice:\n comp_choice = random.randint(1, 3)\n if comp_choice == 1:\n comp_choice_name = 'Rock'\n elif comp_choice == 2:\n comp_choice_name = 'paper'\n else:\n comp_choice_name = 'scissor'\n print('Computer choice is: ' + comp_choice_name)\n print(choice_name + ' V/s ' + comp_choice_name)\n if (choice == 1 and comp_choice == 2 or choice == 2 and \n comp_choice == 1):\n print('paper wins => ', end='')\n result = 'paper'\n elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:\n print('Rock wins =>', end='')\n result = 'Rock'\n else:\n print('scissor wins =>', end='')\n result = 'scissor'\n if result == choice_name:\n print('<== User wins ==>')\n else:\n print('<== Computer wins ==>')\n\n\n<mask token>\ngame()\n",
"step-5": "# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\nimport weather_forecast\nfrom weather_forecast import forecast\n\nfrom googlesearch import search\nfrom youtube_search import YoutubeSearch\n\nimport yfinance as yf\nimport smtplib as bot\nimport imaplib as imap\nimport email\n\nimport time\nfrom GoogleNews import GoogleNews\nimport json\n\nt = time.localtime()\ncurrent_time = time.strftime(\"%H:%M:%S\", t)\nfrom datetime import date\nimport random\n\ntoday = date.today()\n\nd1 = today.strftime(\"%Y-%m-%d\")\n\n\n\ndef game():\n\n for i in range(1000):\n\n request = input('Auto-Bot at your service. Please state your request. ')\n\n if request == 'google':\n query = input('Search: ')\n print(search(query, num_results = 3))\n\n\n elif request == 'stocks':\n ticker = input('Ticker Symbol: ')\n realticker = yf.Ticker(ticker)\n print(realticker.history(period= '1m'))\n\n elif request == 'weather':\n place = input('City: ')\n weather = weather_forecast.forecast(place=place, time=current_time, date=d1)\n \n\n\n elif request == 'email':\n to = input('Email address: ')\n content = input('What do you want to say? ')\n\n\n\n\n\n\n address = '[email protected]'\n password = 'AutoBot1'\n server = 'imap.gmail.com'\n\n\n\n s = bot.SMTP(host= 'smtp.gmail.com', port= 587)\n s.starttls()\n s.login(address, password)\n s.ehlo()\n\n\n s.sendmail(address, to ,content)\n {}\n elif request == 'song':\n song = input('Song name: ')\n results = YoutubeSearch(song, max_results=1).to_dict()\n dict = results[0].values()\n newdict = list(dict)\n\n url = newdict[7]\n\n\n\n print(f'https://www.youtube.com{url}')\n\n elif request == 'news':\n\n news = input('Search news: ')\n gn = GoogleNews()\n top = gn.search(news)\n newnews = gn.results()\n\n dict = list(newnews[0].values())\n dicttwo = list(newnews[1].values())\n dictthree = list(newnews[2].values())\n dictfour = list(newnews[3].values())\n dictfive = list(newnews[4].values())\n\n\n\n title1 = dict[0]\n title2 = dicttwo[0]\n title3 = dictthree[0]\n title4 = dictfour[0]\n title5 = dictfive[0]\n\n src1 = dict[1]\n src2 = dicttwo[1]\n src3 = dictthree[1]\n src4 = dictfour[1]\n src5 = dictfive[1]\n\n cap1 = dict[4]\n cap2 = dicttwo[4]\n cap3 = dictthree[4]\n cap4 = dictfour[4]\n cap5 = dictfive[4]\n\n url1 = dict[5]\n url2 = dicttwo[5]\n url3 = dictthree[5]\n url4 = dictfour[5]\n url5 = dictfive[5]\n\n print(f'Title: {title1}')\n print(f'Source: {src1}')\n print(f'Caption: {cap1}')\n print(f'Url: {url1}')\n\n print(f'Title: {title2}')\n print(f'Source: {src2}')\n print(f'Caption: {cap2}')\n print(f'Url: {url2}')\n\n print(f'Title: {title3}')\n print(f'Source: {src3}')\n print(f'Caption: {cap3}')\n print(f'Url: {url3}')\n\n print(f'Title: {title4}')\n print(f'Source: {src4}')\n print(f'Caption: {cap4}')\n print(f'Url: {url4}')\n\n print(f'Title: {title5}')\n print(f'Source: {src5}')\n print(f'Caption: {cap5}')\n print(f'Url: {url5}')\n\n\n\n\n\n\n\n\n elif request == 'math':\n\n def add(x, y):\n return x + y\n\n # This function subtracts two numbers\n def subtract(x, y):\n return x - y\n\n # This function multiplies two numbers\n def multiply(x, y):\n return x * y\n\n # This function divides two numbers\n def divide(x, y):\n return x / y\n\n\n\n while True:\n # Take input from the user\n choice = input(\"Enter choice( + / - / * / / ): \")\n\n # Check if choice is one of the four options\n if choice in ('+', '-', '*', '/'):\n num1 = float(input(\"Enter first number: \"))\n num2 = float(input(\"Enter second number: \"))\n\n if choice == '+':\n print(num1, \"+\", num2, \"=\", add(num1, num2))\n\n elif choice == '-':\n print(num1, \"-\", num2, \"=\", subtract(num1, num2))\n\n elif choice == '*':\n print(num1, \"*\", num2, \"=\", multiply(num1, num2))\n\n elif choice == '/':\n print(num1, \"/\", num2, \"=\", divide(num1, num2))\n break\n else:\n print(\"Invalid Input\")\n\n elif request == 'game':\n\n type = input('Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors ')\n\n if type == '1':\n unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n theBoard = {'7': ' ', '8': ' ', '9': ' ',\n '4': ' ', '5': ' ', '6': ' ',\n '1': ' ', '2': ' ', '3': ' '}\n\n board_keys = []\n\n for key in theBoard:\n board_keys.append(key)\n\n ''' We will have to print the updated board after every move in the game and \n thus we will make a function in which we'll define the printBoard function\n so that we can easily print the board everytime by calling this function. '''\n\n def printBoard(board):\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\n print('-+-+-')\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\n print('-+-+-')\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\n\n # Now we'll write the main function which has all the gameplay functionality.\n def tictactoe():\n\n turn = 'X'\n count = 0\n\n for i in range(10):\n printBoard(theBoard)\n print(\"It's your turn,\" + turn + \".Move to which place?\")\n\n if turn == 'O':\n choice = random.randint(1,9)\n choice = unused_keys[choice]\n\n\n\n if theBoard[f'{choice}'] == ' ':\n theBoard[choice] = turn\n unused_keys.remove(choice)\n count += 1\n\n\n\n\n\n\n elif turn == 'X':\n move = input()\n\n if theBoard[move] == ' ':\n theBoard[move] = turn\n unused_keys.remove(move)\n count += 1\n else:\n print(\"That place is already filled.\\nMove to which place?\")\n continue\n\n # Now we will check if player X or O has won,for every move after 5 moves.\n if count >= 5:\n if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal\n printBoard(theBoard)\n print(\"\\nGame Over.\\n\")\n print(\" **** \" + turn + \" won. ****\")\n break\n\n # If neither X nor O wins and the board is full, we'll declare the result as 'tie'.\n if count == 9:\n print(\"\\nGame Over.\\n\")\n print(\"It's a Tie!!\")\n\n # Now we have to change the player after every move.\n if turn == 'X':\n turn = 'O'\n else:\n turn = 'X'\n\n tictactoe()\n\n\n\n\n elif type == '2':\n print(\"Winning Rules of the Rock paper scissor game as follows: \\n\"\n + \"Rock vs paper->paper wins \\n\"\n + \"Rock vs scissor->Rock wins \\n\"\n + \"paper vs scissor->scissor wins \\n\")\n\n\n print(\"Enter choice \\n 1. Rock \\n 2. paper \\n 3. scissor \\n\")\n\n\n choice = int(input(\"User turn: \"))\n\n # OR is the short-circuit operator\n # if any one of the condition is true\n # then it return True value\n\n # looping until user enter invalid input\n while choice > 3 or choice < 1:\n choice = int(input(\"enter valid input: \"))\n\n # initialize value of choice_name variable\n # corresponding to the choice value\n if choice == 1:\n choice_name = 'Rock'\n elif choice == 2:\n choice_name = 'paper'\n else:\n choice_name = 'scissor'\n\n # print user choice\n print(\"user choice is: \" + choice_name)\n print(\"\\nNow its computer turn.......\")\n\n # Computer chooses randomly any number\n # among 1 , 2 and 3. Using randint method\n # of random module\n comp_choice = random.randint(1, 3)\n\n # looping until comp_choice value\n # is equal to the choice value\n while comp_choice == choice:\n comp_choice = random.randint(1, 3)\n\n # initialize value of comp_choice_name\n # variable corresponding to the choice value\n if comp_choice == 1:\n comp_choice_name = 'Rock'\n elif comp_choice == 2:\n comp_choice_name = 'paper'\n else:\n comp_choice_name = 'scissor'\n\n print(\"Computer choice is: \" + comp_choice_name)\n\n print(choice_name + \" V/s \" + comp_choice_name)\n\n # condition for winning\n if ((choice == 1 and comp_choice == 2) or\n (choice == 2 and comp_choice == 1)):\n print(\"paper wins => \", end=\"\")\n result = \"paper\"\n\n elif ((choice == 1 and comp_choice == 3) or\n (choice == 3 and comp_choice == 1)):\n print(\"Rock wins =>\", end=\"\")\n result = \"Rock\"\n else:\n print(\"scissor wins =>\", end=\"\")\n result = \"scissor\"\n\n # Printing either user or computer wins\n if result == choice_name:\n print(\"<== User wins ==>\")\n else:\n print(\"<== Computer wins ==>\")\n\n\n'''\nmail = imap.IMAP4_SSL(server)\nmail.login(address, password)\n\nmail.select('inbox')\n\nstatus, data = mail.search(None, 'ALL')\n\nids = []\nfor block in data:\n ids += block.split()\nfor i in ids:\n status, data = mail.fetch(i, '(RFC822)')\n\n for response_part in data:\n if isinstance(response_part, tuple):\n message = email.message_from_bytes(response_part[1])\n mail_from = message['from']\n mail_subject = message['subject']\n\n if message.is_multipart():\n mail_content = ''\n for part in message.get_payload():\n if part.get_content_type() == 'text/plain':\n mail_content += part.get_payload()\n else:\n mail_content = message.get_payload()\n\n\nprint(mail_from)\ns.quit()\n\n'''\ngame()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#Las listas son similares a las tuplas
# con la diferencia de que permiten modificar los datos una vez creados
miLista = ['cadena', 21, 2.8, 'nuevo dato', 25]
print (miLista)
miLista[2] = 3.8 #el tercer elemento ahora es 3.8
print(miLista)
miLista.append('NuevoDato')
print(miLista)
|
normal
|
{
"blob_id": "27ec06d084bf819383801be0351c04e7d1fc1752",
"index": 5176,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(miLista)\n<mask token>\nprint(miLista)\nmiLista.append('NuevoDato')\nprint(miLista)\n",
"step-3": "miLista = ['cadena', 21, 2.8, 'nuevo dato', 25]\nprint(miLista)\nmiLista[2] = 3.8\nprint(miLista)\nmiLista.append('NuevoDato')\nprint(miLista)\n",
"step-4": "#Las listas son similares a las tuplas\n# con la diferencia de que permiten modificar los datos una vez creados\nmiLista = ['cadena', 21, 2.8, 'nuevo dato', 25]\nprint (miLista)\nmiLista[2] = 3.8 #el tercer elemento ahora es 3.8\nprint(miLista)\nmiLista.append('NuevoDato')\nprint(miLista)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Uses python3
import sys
from operator import attrgetter
from collections import namedtuple
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
segments = sorted(segments, key=attrgetter('end'), reverse=True)
points = []
#write your code here
while len(segments) > 0:
segement = segments.pop()
point = segement.end
while len(segments) > 0 and point >= segments[-1].start:
segments.pop()
if point not in points:
points.append(point)
return points
if __name__ == '__main__':
input = sys.stdin.read()
#input = input()
n, *data = map(int, input.split())
segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))
points = optimal_points(segments)
print(len(points))
print(*points)
|
normal
|
{
"blob_id": "c007dc2416d3f7c883c44dea5471927ea6f816d6",
"index": 3973,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n",
"step-3": "<mask token>\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n",
"step-4": "import sys\nfrom operator import attrgetter\nfrom collections import namedtuple\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n",
"step-5": "# Uses python3\nimport sys\nfrom operator import attrgetter\nfrom collections import namedtuple\n\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n\n #write your code here\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n #input = input()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import graphene
import f1hub.drivers.schema
import f1hub.results.schema
import f1hub.constructors.schema
import f1hub.races.schema
import f1hub.status.schema
import f1hub.circuits.schema
import f1hub.constructorresults.schema
import f1hub.constructorstandings.schema
import f1hub.driverstandings.schema
import f1hub.laptimes.schema
import f1hub.pitstops.schema
import f1hub.qualifying.schema
import f1hub.seasons.schema
class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.schema.Query, f1hub.circuits.schema.Query,\
f1hub.constructorresults.schema.Query, f1hub.constructorstandings.schema.Query, f1hub.driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.pitstops.schema.Query,\
f1hub.qualifying.schema.Query, f1hub.seasons.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
|
normal
|
{
"blob_id": "05e4bcc7323b908a7b45d766ada463ce172e25c4",
"index": 378,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n",
"step-4": "import graphene\nimport f1hub.drivers.schema\nimport f1hub.results.schema\nimport f1hub.constructors.schema\nimport f1hub.races.schema\nimport f1hub.status.schema\nimport f1hub.circuits.schema\nimport f1hub.constructorresults.schema\nimport f1hub.constructorstandings.schema\nimport f1hub.driverstandings.schema\nimport f1hub.laptimes.schema\nimport f1hub.pitstops.schema\nimport f1hub.qualifying.schema\nimport f1hub.seasons.schema\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n",
"step-5": "import graphene\n\nimport f1hub.drivers.schema\nimport f1hub.results.schema\nimport f1hub.constructors.schema\nimport f1hub.races.schema\nimport f1hub.status.schema\nimport f1hub.circuits.schema\nimport f1hub.constructorresults.schema\nimport f1hub.constructorstandings.schema\nimport f1hub.driverstandings.schema\nimport f1hub.laptimes.schema\nimport f1hub.pitstops.schema\nimport f1hub.qualifying.schema\nimport f1hub.seasons.schema\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.schema.Query, f1hub.circuits.schema.Query,\\\n f1hub.constructorresults.schema.Query, f1hub.constructorstandings.schema.Query, f1hub.driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.pitstops.schema.Query,\\\n f1hub.qualifying.schema.Query, f1hub.seasons.schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from turtle import *
import time
import random
colormode(255)
class Ball(Turtle):
def __init__(self, x,y,dx,dy,r):
Turtle.__init__(self)
self.pu()
self.goto(x,y)
self.dx = dx
self.dy = dy
self.r = r
self.shape("circle")
self.shapesize(r/10)
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
self.color(r,g,b)
def move(self,screen_width, screen_hight):
current_x = self.xcor()
new_x = current_x + self.dx
current_y = self.ycor()
new_y = current_y + self.dy
right_side_ball = new_x + self.r
left_side_ball = new_x - self.r
bottom_ball = new_y - self.r
upper_ball_side = new_y + self.r
self.goto(new_x, new_y)
if bottom_ball < -screen_hight/2 or upper_ball_side > screen_hight/2:
self.dy *= -1
if left_side_ball < -screen_width/2 or right_side_ball > screen_width/2:
self.dx *= -1
tracer(0)
ht()
RUNNING = True
SLEEP = 0.0077
SCREEN_WIDTH = getcanvas().winfo_width()/2
SCREEN_HEIGHT = getcanvas().winfo_height()/2
MY_BALL = (0,0,0.5,-0.4,30)
NUMBER_OF_BALLS = 5
MINIMUM_BALL_RADIUS = 10
MAXIMUM_BALL_RADIUS = 100
MINIMUM_BALL_DX = -5
MAXIMUM_BALL_DX = 5
MINIMUM_BALL_DY = -5
MAXIMUM_BALL_DY = 5
BALLS = []
for i in range(NUMBER_OF_BALLS):
x = random.randint(int(- SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS) , int(SCREEN_WIDTH/2 - MAXIMUM_BALL_RADIUS))
y = random.randint(-SCREEN_HEIGHT/2 + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT/2 - MAXIMUM_BALL_RADIUS)
dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)
dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)
r = random.randint(MINIMUM_BALL_RADIUS , MAXIMUM_BALL_RADIUS)
while dx == 0:
dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)
while dy == 0:
dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)
new_ball = Ball(x,y,dx,dy,r)
BALLS.append(new_ball)
def move_all_balls(BALLS):
for index in range(len(BALLS)):
BALLS[index].move(SCREEN_WIDTH , SCREEN_HEIGHT)
#move_all_balls(BALLS)
mainloop()
|
normal
|
{
"blob_id": "17cd6746e58a7f33bc239c1420d51c6810ed02d8",
"index": 3575,
"step-1": "<mask token>\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\n<mask token>\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\n<mask token>\n",
"step-2": "<mask token>\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\n<mask token>\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n",
"step-3": "<mask token>\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width() / 2\nSCREEN_HEIGHT = getcanvas().winfo_height() / 2\nMY_BALL = 0, 0, 0.5, -0.4, 30\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\nBALLS = []\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n",
"step-4": "from turtle import *\nimport time\nimport random\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width() / 2\nSCREEN_HEIGHT = getcanvas().winfo_height() / 2\nMY_BALL = 0, 0, 0.5, -0.4, 30\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\nBALLS = []\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n",
"step-5": "from turtle import *\nimport time\nimport random\ncolormode(255)\n\nclass Ball(Turtle):\n def __init__(self, x,y,dx,dy,r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x,y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape(\"circle\")\n self.shapesize(r/10)\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n self.color(r,g,b)\n def move(self,screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if bottom_ball < -screen_hight/2 or upper_ball_side > screen_hight/2:\n self.dy *= -1\n if left_side_ball < -screen_width/2 or right_side_ball > screen_width/2:\n self.dx *= -1\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width()/2\nSCREEN_HEIGHT = getcanvas().winfo_height()/2\n\nMY_BALL = (0,0,0.5,-0.4,30)\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\n\nBALLS = []\n\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(- SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS) , int(SCREEN_WIDTH/2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT/2 + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT/2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS , MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)\n new_ball = Ball(x,y,dx,dy,r)\n BALLS.append(new_ball)\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH , SCREEN_HEIGHT)\n\n#move_all_balls(BALLS)\n\nmainloop()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
'''
Given []int, most mostCompetitive subsequence is
a sublist of nums.
So we calculate a score, score is ∀ x ∈ nums, score += x_n - x_n-1
You can remove as many elements are you need to.
What is the mostCompetitive subsequence that you can come up with?
[1,3,5]
[1,3,4] ← More competitive
[1,2,5] ← More competitive
[1,3,4]
This is true b/c we evaluate on the first point where the two differ.
1) We care about creating lists that contain as small of numbers as
possible. The numbers don't need to be in order, they just need to be
small.
We care about all numbers, s.t. we can create a subsequence of k or more
behind them.
Get all possible sub-sequences, with length k or more. If more than k,
iterate through how we can remove the largest elements.
We should also keep track of the smallest number that corresponds to a valid
sequence?
I'm leaning towards a brute force method.
1) Find all sequences of length k. Store the most competitive.
So we should write a function that compares two sequences to see which is more
competitive.
Do one run, with subsequence == k.
Then try to beat that run.
Keep track of what the 'winning' subsequence is, and
iterate through possible values.
So two iterations.
[2,4,3,3,5,4,9,6] | k = 4
( )
ans = 2,4,3,3
[2,4,3,3,5,4,9,6] | k = 4
( )
2,4,3,3
^
idx = 0
Once we have 'beaten' it, out of the remaining
elements, remove the max element until length of
sublist is workable.
[2, 3, 3, ]
1) Write isMoreCompetitive
2) First pass → get most competitive with sliding window len = k
3) Second + pass. If we make a change/'win', re-run again. If re-run and
no change, we are done.
'''
'''
To Review:
def mostCompetitive(self, nums, k):
to_remove = len(nums) - k
stack = []
for x in nums:
while stack and x < stack[-1] and to_remove:
to_remove -= 1
stack.pop()
stack.append(x)
for _ in range(to_remove):
stack.pop()
return stack
'''
class Solution:
# is a more competitive than b?
def isMoreCompetitive(self, a, b):
if len(a) != len(b):
print("Error, len()'s do not match'")
return "Error"
for i in range(len(a)):
if a[i] == b[i]:
continue
elif a[i] < b[i]:
return True
else:
return False
return False
def refined(self, nums, i, a, ans):
if i >= len(nums):
if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:
return False, None
elif len(a) != len(ans):
return False, None
else:
return True, a
elif i < len(nums) and len(ans)-len(a) <= len(nums)-i :
boolA, respA = self.refined(nums, i+1, a+[nums[i]], ans)
boolB, respB = self.refined(nums, i+1, a, ans)
if boolA == True and boolB == True:
if self.isMoreCompetitive(respA, respB):
return True, respA
else:
return True, respB
elif boolA == True:
return boolA, respA
elif boolB == True:
return True, respB
else:
return False, None
else:
return False, None
def mostCompetitive(self, nums, k):
if len(nums) < k :
print("length mismatch @ init")
return False
ans = list(nums[0:k])
tmp = list(nums[0:k])
i = k
# Initial pass
while i < len(nums):
# print(tmp)
del tmp[0]
# print(tmp)
tmp.append(nums[i])
# print(tmp)
if self.isMoreCompetitive(tmp, ans):
ans = list(tmp)
i += 1
# print("ans: {}, tmp:{}".format(ans, tmp))
# print("")
# Pass 2
shouldContinue = True
idx = 0
foundAnswer, updateAns = self.refined(nums, 0, [], ans)
if foundAnswer == True:
return updateAns
return ans
if __name__ == '__main__':
s = Solution()
print(s.mostCompetitive([3,5,2,6], 2))
print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))
print(s.mostCompetitive([84,10,71,23,66,61,62,64,34,41,80,25,91,43,4,75,65,13,37,41,46,90,55,8,85,61,95,71], 24))
print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))
[11,52,57,91,47,95,86,46,87,47,70,56,54,61,89,44,3,73,1,7,87,48,17,25,49,54,6,72,97,62,16,11,47,34,68,58,14,36,46,65,2,15]
18
|
normal
|
{
"blob_id": "f8b04f374e1c55d4985be793939f0ff9393c29e0",
"index": 2571,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n\n def mostCompetitive(self, nums, k):\n if len(nums) < k:\n print('length mismatch @ init')\n return False\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n while i < len(nums):\n del tmp[0]\n tmp.append(nums[i])\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n shouldContinue = True\n idx = 0\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n if foundAnswer == True:\n return updateAns\n return ans\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n\n def mostCompetitive(self, nums, k):\n if len(nums) < k:\n print('length mismatch @ init')\n return False\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n while i < len(nums):\n del tmp[0]\n tmp.append(nums[i])\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n shouldContinue = True\n idx = 0\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n if foundAnswer == True:\n return updateAns\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.mostCompetitive([3, 5, 2, 6], 2))\n print(s.mostCompetitive([2, 4, 3, 3, 5, 4, 9, 6], 4))\n print(s.mostCompetitive([84, 10, 71, 23, 66, 61, 62, 64, 34, 41, 80, 25,\n 91, 43, 4, 75, 65, 13, 37, 41, 46, 90, 55, 8, 85, 61, 95, 71], 24))\n print(s.mostCompetitive([2, 4, 3, 3, 5, 4, 9, 6], 4))\n [11, 52, 57, 91, 47, 95, 86, 46, 87, 47, 70, 56, 54, 61, 89, 44, 3, 73,\n 1, 7, 87, 48, 17, 25, 49, 54, 6, 72, 97, 62, 16, 11, 47, 34, 68, 58,\n 14, 36, 46, 65, 2, 15]\n18\n",
"step-5": "'''\nGiven []int, most mostCompetitive subsequence is\na sublist of nums.\n\nSo we calculate a score, score is ∀ x ∈ nums, score += x_n - x_n-1\n\nYou can remove as many elements are you need to.\n\nWhat is the mostCompetitive subsequence that you can come up with?\n\n[1,3,5]\n[1,3,4] ← More competitive\n\n[1,2,5] ← More competitive\n[1,3,4]\n\nThis is true b/c we evaluate on the first point where the two differ.\n\n1) We care about creating lists that contain as small of numbers as\npossible. The numbers don't need to be in order, they just need to be\nsmall.\n\nWe care about all numbers, s.t. we can create a subsequence of k or more\nbehind them.\n\nGet all possible sub-sequences, with length k or more. If more than k,\niterate through how we can remove the largest elements.\n\nWe should also keep track of the smallest number that corresponds to a valid\nsequence?\n\nI'm leaning towards a brute force method.\n\n1) Find all sequences of length k. Store the most competitive.\n\n\nSo we should write a function that compares two sequences to see which is more\ncompetitive.\n\nDo one run, with subsequence == k.\nThen try to beat that run.\n\nKeep track of what the 'winning' subsequence is, and\niterate through possible values.\n\nSo two iterations.\n\n[2,4,3,3,5,4,9,6] | k = 4\n ( )\n\nans = 2,4,3,3\n\n\n\n[2,4,3,3,5,4,9,6] | k = 4\n( )\n\n2,4,3,3\n ^\n\nidx = 0\n\nOnce we have 'beaten' it, out of the remaining\nelements, remove the max element until length of\nsublist is workable.\n\n\n[2, 3, 3, ]\n\n1) Write isMoreCompetitive\n2) First pass → get most competitive with sliding window len = k\n3) Second + pass. If we make a change/'win', re-run again. If re-run and\n no change, we are done.\n\n'''\n\n'''\nTo Review:\n\ndef mostCompetitive(self, nums, k):\n to_remove = len(nums) - k\n stack = []\n\n for x in nums:\n while stack and x < stack[-1] and to_remove:\n to_remove -= 1\n stack.pop()\n stack.append(x)\n\n for _ in range(to_remove):\n stack.pop()\n\n return stack\n'''\n\n\n\nclass Solution:\n\n # is a more competitive than b?\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return \"Error\"\n\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n\n elif len(a) != len(ans):\n return False, None\n\n else:\n return True, a\n\n elif i < len(nums) and len(ans)-len(a) <= len(nums)-i :\n boolA, respA = self.refined(nums, i+1, a+[nums[i]], ans)\n boolB, respB = self.refined(nums, i+1, a, ans)\n\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n\n elif boolA == True:\n return boolA, respA\n\n elif boolB == True:\n return True, respB\n\n else:\n return False, None\n\n else:\n return False, None\n\n\n\n def mostCompetitive(self, nums, k):\n\n if len(nums) < k :\n print(\"length mismatch @ init\")\n return False\n\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n\n # Initial pass\n while i < len(nums):\n # print(tmp)\n del tmp[0]\n # print(tmp)\n tmp.append(nums[i])\n # print(tmp)\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n # print(\"ans: {}, tmp:{}\".format(ans, tmp))\n # print(\"\")\n\n # Pass 2\n shouldContinue = True\n idx = 0\n\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n\n if foundAnswer == True:\n return updateAns\n\n return ans\n\n\n\n\nif __name__ == '__main__':\n s = Solution()\n\n print(s.mostCompetitive([3,5,2,6], 2))\n print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))\n print(s.mostCompetitive([84,10,71,23,66,61,62,64,34,41,80,25,91,43,4,75,65,13,37,41,46,90,55,8,85,61,95,71], 24))\n print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))\n\n\n [11,52,57,91,47,95,86,46,87,47,70,56,54,61,89,44,3,73,1,7,87,48,17,25,49,54,6,72,97,62,16,11,47,34,68,58,14,36,46,65,2,15]\n18\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import torch
from collections import OrderedDict
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mplimg
from torch.nn.functional import upsample
import networks.deeplab_resnet as resnet
from mypath import Path
from dataloaders import helpers as helpers
from maskRCNN.maskrcnn_benchmark.config import cfg
from maskRCNN.demo.predictor_person import COCODemo
from skimage import io
PAD_SIZE = 10
def maskRCNN_model():
config_file = "/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.9,
)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
modelName = 'dextr_pascal-sbd'
pad = 50
thres = 0.8
gpu_id = 0
device = torch.device("cpu")
#device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
# Create the network and load the weights
net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print("Initializing weights from: {}".format(os.path.join(Path.models_dir(), modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), modelName + '.pth'),
map_location=lambda storage, loc: storage)
# Remove the prefix .module from the model when it is trained using DataParallel
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:] # remove `module.` from multi-gpu training
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
# Read image and click the points
#plt.ion()
#plt.axis('off')
#plt.imshow(image)
#plt.title('Click the four extreme points of the objects\nHit enter when done (do not close the window)')
#results = []
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
# Mid point
#top = np.array([(x_max-x_min)/2, y_min])
#bottom = np.array([(x_max-x_min)/2, y_max])
#left = np.array([x_min, (y_max-y_min)/2])
#right = np.array([x_max, (y_max-y_min)/2])
# Original
#top = np.array([x_min, y_min])
#bottom = np.array([x_max, y_max])
#left = np.array([x_min, y_max])
#right = np.array([x_max, y_min])
# Customized
top = np.array([x_min+(x_max-x_min)*0.5, y_min-PAD_SIZE])
bottom = np.array([x_min+(x_max-x_min)*0.5, y_max+PAD_SIZE])
left = np.array([x_min-PAD_SIZE, y_min+(y_max-y_min)*0.95])
right = np.array([x_max+PAD_SIZE, y_min+(y_max-y_min)*0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0])+PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk("./ims/"):
for filename in files:
#extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)
#extreme_points_ori = np.array(bbox).astype(np.int)
image_path = path + "/" + filename
image = np.array(Image.open(image_path))
# Get the mask for person from maskRCNN and compute the extreme points using the mask
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
#extreme_points_ori = get_extreme_points(BBox)
#extreme_points_ori = np.array([[205,60],[3,450],[275,475],[560,470]]).astype(np.int)
# Crop image to the bounding box from the extreme points and resize
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)
# Generate extreme point heat map normalized to image values
extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad,
pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
# Concatenate inputs and convert to tensor
input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])
# Run a forward pass
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres
#results.append(result)
results = result
# Plot the results
#plt.imshow(//helpers.overlay_masks(image / 255, results))
#plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave("./output/output_" + filename, out_img)
|
normal
|
{
"blob_id": "2c8b8e9767ac8400fb6390e0851d9df10df7cd8c",
"index": 8729,
"step-1": "<mask token>\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\n<mask token>\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\n<mask token>\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\n<mask token>\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-3": "<mask token>\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device('cpu')\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), \n modelName + '.pth'), map_location=lambda storage, loc: storage)\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-4": "import os\nimport torch\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as mplimg\nfrom torch.nn.functional import upsample\nimport networks.deeplab_resnet as resnet\nfrom mypath import Path\nfrom dataloaders import helpers as helpers\nfrom maskRCNN.maskrcnn_benchmark.config import cfg\nfrom maskRCNN.demo.predictor_person import COCODemo\nfrom skimage import io\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device('cpu')\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), \n modelName + '.pth'), map_location=lambda storage, loc: storage)\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-5": "import os\nimport torch\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as mplimg\n\nfrom torch.nn.functional import upsample\n\nimport networks.deeplab_resnet as resnet\nfrom mypath import Path\nfrom dataloaders import helpers as helpers\n\nfrom maskRCNN.maskrcnn_benchmark.config import cfg\nfrom maskRCNN.demo.predictor_person import COCODemo\nfrom skimage import io\n\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = \"/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml\"\n\n # update the config options with the config file\n cfg.merge_from_file(config_file)\n # manual override some options\n cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n\n coco_demo = COCODemo(\n cfg,\n min_image_size=800,\n confidence_threshold=0.9,\n )\n\n return coco_demo\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n\n return predictions, bbox, masks, heatmap\n\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device(\"cpu\")\n#device = torch.device(\"cuda:\"+str(gpu_id) if torch.cuda.is_available() else \"cpu\")\n\n# Create the network and load the weights\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint(\"Initializing weights from: {}\".format(os.path.join(Path.models_dir(), modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), modelName + '.pth'),\n map_location=lambda storage, loc: storage)\n# Remove the prefix .module from the model when it is trained using DataParallel\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:] # remove `module.` from multi-gpu training\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n# Read image and click the points\n#plt.ion()\n#plt.axis('off')\n#plt.imshow(image)\n#plt.title('Click the four extreme points of the objects\\nHit enter when done (do not close the window)')\n\n#results = []\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n\n # Mid point\n #top = np.array([(x_max-x_min)/2, y_min])\n #bottom = np.array([(x_max-x_min)/2, y_max])\n #left = np.array([x_min, (y_max-y_min)/2])\n #right = np.array([x_max, (y_max-y_min)/2])\n\n # Original\n #top = np.array([x_min, y_min])\n #bottom = np.array([x_max, y_max])\n #left = np.array([x_min, y_max])\n #right = np.array([x_max, y_min])\n\n # Customized\n top = np.array([x_min+(x_max-x_min)*0.5, y_min-PAD_SIZE])\n bottom = np.array([x_min+(x_max-x_min)*0.5, y_max+PAD_SIZE])\n left = np.array([x_min-PAD_SIZE, y_min+(y_max-y_min)*0.95])\n right = np.array([x_max+PAD_SIZE, y_min+(y_max-y_min)*0.95])\n\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0])+PAD_SIZE]\n\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n\n return points\n\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk(\"./ims/\"):\n for filename in files:\n #extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)\n #extreme_points_ori = np.array(bbox).astype(np.int)\n image_path = path + \"/\" + filename\n image = np.array(Image.open(image_path))\n\n # Get the mask for person from maskRCNN and compute the extreme points using the mask\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n\n #extreme_points_ori = get_extreme_points(BBox)\n #extreme_points_ori = np.array([[205,60],[3,450],[275,475],[560,470]]).astype(np.int)\n \n\n # Crop image to the bounding box from the extreme points and resize\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)\n\n # Generate extreme point heat map normalized to image values\n extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad,\n pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n\n # Concatenate inputs and convert to tensor\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])\n\n # Run a forward pass\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres\n\n #results.append(result)\n results = result\n\n # Plot the results\n #plt.imshow(//helpers.overlay_masks(image / 255, results))\n #plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave(\"./output/output_\" + filename, out_img)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.1.6 on 2021-02-15 12:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),
('author', models.CharField(max_length=250)),
('was_buplished', models.DateField()),
],
),
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=250)),
('start', models.DateTimeField()),
('end', models.DateTimeField(default=None)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),
],
),
]
|
normal
|
{
"blob_id": "6239cb08509b8e84a88db95479af05845876d9b6",
"index": 1502,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-15 12:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Book',\n fields=[\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),\n ('author', models.CharField(max_length=250)),\n ('was_buplished', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Alias',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('alias', models.CharField(max_length=250)),\n ('start', models.DateTimeField()),\n ('end', models.DateTimeField(default=None)),\n ('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
# 本文件中,用__unicode__代替了__str__,以免在admin界面中显示中文而引发错误。
# 参考:http://blog.csdn.net/jiangnanandi/article/details/3574007
# 或者另一个解决方案:http://blog.sina.com.cn/s/blog_63cf1c510101an74.html
class FatherMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
son = models.BooleanField("子菜单?", default=False)
class Meta:
verbose_name = u"一级菜单"
verbose_name_plural = u"一级菜单"
def __unicode__(self):
return self.title
class SonMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
father = models.ForeignKey(
'seclab.FatherMenu', blank=True, null=True, verbose_name=u"父菜单")
class Meta:
verbose_name = u"二级菜单"
verbose_name_plural = u"二级菜单"
def __unicode__(self):
return self.title
class Img(models.Model):
tag = models.CharField(u"类型", max_length=20)
tagId = models.IntegerField(u"序号")
intro = models.CharField(u"描述", max_length=100)
title = models.CharField(u"标题", max_length=100)
slug = models.CharField(u"链接", max_length=100, db_index=True)
class Meta:
verbose_name = u"图片"
verbose_name_plural = u"图片"
def __unicode__(self):
return self.slug
class Article(models.Model):
tag = models.CharField(u"类型", max_length=20)
title = models.CharField(u"标题", max_length=100)
content = models.TextField(u"内容", default=u'', blank=True)
author = models.CharField(u"作者", max_length=100)
pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)
home_display = models.BooleanField(u"首页显示", default=False)
class Meta:
verbose_name = u"文章"
verbose_name_plural = u"文章"
def __unicode__(self):
return self.title
|
normal
|
{
"blob_id": "49b007b723b9c43fb79d5dffa2546c856faf4937",
"index": 8625,
"step-1": "<mask token>\n\n\nclass SonMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n <mask token>\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-2": "<mask token>\n\n\nclass SonMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-3": "<mask token>\n\n\nclass FatherMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'一级菜单'\n verbose_name_plural = u'一级菜单'\n <mask token>\n\n\nclass SonMenu(models.Model):\n title = models.CharField(u'菜单名', max_length=20)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n father = models.ForeignKey('seclab.FatherMenu', blank=True, null=True,\n verbose_name=u'父菜单')\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-4": "<mask token>\n\n\nclass FatherMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'一级菜单'\n verbose_name_plural = u'一级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass SonMenu(models.Model):\n title = models.CharField(u'菜单名', max_length=20)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n father = models.ForeignKey('seclab.FatherMenu', blank=True, null=True,\n verbose_name=u'父菜单')\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-5": "# _*_ coding:utf-8 _*_\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.core.urlresolvers import reverse \n# Create your models here.\n\n\n# 本文件中,用__unicode__代替了__str__,以免在admin界面中显示中文而引发错误。\n# 参考:http://blog.csdn.net/jiangnanandi/article/details/3574007\n# 或者另一个解决方案:http://blog.sina.com.cn/s/blog_63cf1c510101an74.html\n\n\nclass FatherMenu(models.Model):\n\n title = models.CharField(u\"菜单名\", max_length=20)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n son = models.BooleanField(\"子菜单?\", default=False)\n\n class Meta:\n verbose_name = u\"一级菜单\"\n verbose_name_plural = u\"一级菜单\"\n\n def __unicode__(self):\n return self.title\n\n\nclass SonMenu(models.Model):\n\n title = models.CharField(u\"菜单名\", max_length=20)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n father = models.ForeignKey(\n 'seclab.FatherMenu', blank=True, null=True, verbose_name=u\"父菜单\")\n\n class Meta:\n verbose_name = u\"二级菜单\"\n verbose_name_plural = u\"二级菜单\"\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u\"类型\", max_length=20)\n tagId = models.IntegerField(u\"序号\")\n intro = models.CharField(u\"描述\", max_length=100)\n title = models.CharField(u\"标题\", max_length=100)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n\n class Meta:\n verbose_name = u\"图片\"\n verbose_name_plural = u\"图片\"\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u\"类型\", max_length=20)\n title = models.CharField(u\"标题\", max_length=100)\n content = models.TextField(u\"内容\", default=u'', blank=True)\n author = models.CharField(u\"作者\", max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u\"首页显示\", default=False)\n\n class Meta:\n verbose_name = u\"文章\"\n verbose_name_plural = u\"文章\"\n\n def __unicode__(self):\n return self.title\n",
"step-ids": [
7,
8,
10,
11,
14
]
}
|
[
7,
8,
10,
11,
14
] |
import csv
import json
import re
import itertools
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from networkx.algorithms import community
import snap
import numpy
# setting up data structures to map actor IDs to objects in order to increase run time.
csv.field_size_limit(100000000)
curr_actor_id = 1
all_actors = dict()
all_actors_id_map = dict()
all_actors_frequencies = dict()
edges = set()
weights = dict()
movies = list()
movies_dict = dict()
edges_last_60_20 = set()
comm = list()
PG = nx.Graph()
class Actor:
def __init__(self, name: str, id:int):
self.filmography = set()
self.name = name
self.id = id
def getFilms(self):
return self.filmography
def getName(self):
return self.name
def getId(self):
return self.id
def updateFilms(self, film:int):
self.filmography.add(film)
class Movie:
def __init__(self, id: int):
self.actors = set()
self.name = ""
self.id = id
self.year = 0
def getName(self):
return self.name
def getActors(self):
return self.actors
def getId(self):
return self.id
def getDate(self):
return self.year
def updateActors(self, actor:Actor):
self.actors.add(actor)
def updateActors(self, actors_to_add:set()):
for x in actors_to_add:
self.actors.add(x)
def setDate(self, i: int):
self.year = i
#parsing data from csv and dropping crew column
reader = pd.read_csv('credits.csv', header = 0)
crewless = reader.drop('crew', axis = 1)
cleanup = re.compile('[^a-zA-Z\s]')
#skip the header row
row = crewless.iterrows()
#loop through each row
for x in range(len(reader.index)):
cur_row = next(row)
data = cur_row[1][0]
id = cur_row[1][1]
actors = set()
#create an instance of a Movie for each row
movie = Movie(int(id))
movies.append(movie)
movies_dict[id] = movie
#split the string around each name
split_around_names = data.split('name')
#parse actors, and create an instance of Actor for each actor in each movie
for y in range(1, len(split_around_names)):
#Cleaning up characters and spaces around the actor's name
actorName = str(split_around_names[y].split('order')[0])
actorName = cleanup.sub(' ', actorName)
actorName = actorName.strip()
#Create the Actor and update his/her filmography
if actorName not in all_actors.keys():
a = Actor(actorName, curr_actor_id)
curr_actor_id += 1
a.updateFilms(movie)
actors.add(a)
all_actors[actorName] = a
all_actors_frequencies[a] = 1
all_actors_id_map[curr_actor_id] = a
else:
all_actors[actorName].updateFilms(movie)
all_actors_frequencies[a] += 1
actors.add(all_actors[actorName])
#Update the set of actors per movie
movie.updateActors(actors)
reader = pd.read_csv('movies_metadata.csv', header = 0)
reader.drop(reader.columns.difference(['id', 'release_date']), 1, inplace=True)
row = reader.iterrows()
cleaned_actors = set()
cleaned_movies_1 = set()
cleaned_movies = set()
# adding ids to movies from movie files
for x in range(len(reader.index)):
cur_row = next(row)
id = cur_row[1][0]
date = cur_row[1][1]
id = int(id)
year = date[:4]
year_int = int(year)
if id in movies_dict.keys():
movies_dict[id].setDate(year_int)
cleaned_movies_1.add(movies_dict[id])
def clean(threshold: int):
for actorName in all_actors.keys():
if len(all_actors[actorName].getFilms()) > threshold:
cleaned_actors.add(all_actors[actorName])
else:
for movie in all_actors[actorName].getFilms():
if all_actors[actorName] in movie.getActors():
movie.getActors().remove(all_actors[actorName])
def clean_movies(threshold: int):
for movie in cleaned_movies_1:
if 2017 - movie.getDate() <= threshold:
cleaned_movies.add(movie)
else:
for actor in movie.getActors():
s = actor.getFilms()
s.remove(movie)
def createGraph():
counter = 0
G = nx.Graph()
PG_actors = set()
#fill graph with nodes
for actor in cleaned_actors:
G.add_node(actor.getId())
#generate a list of edges and weights based on frequencie of combination appearances
for movie in cleaned_movies:
actorIds = set()
for actor in movie.getActors():
actorIds.add(actor.getId())
combinations = itertools.combinations(actorIds, 2)
for comb in combinations:
reverse = comb[::-1]
if (comb not in edges) and (reverse not in edges):
counter+=1
if (2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20):
if (comb not in edges_last_60_20) and (reverse not in edges_last_60_20):
edges_last_60_20.add(comb)
edges.add(comb)
weights[comb] = 1
else:
if comb in edges:
weights[comb] = weights[comb] + 1
elif reverse in edges:
weights[reverse] = weights[reverse] + 1
G.add_edges_from(edges)
for x in edges_last_60_20:
if x[0] not in PG_actors:
PG_actors.add(x[0])
if x[1] not in PG_actors:
PG_actors.add(x[1])
PG.add_nodes_from(PG_actors)
PG.add_edges_from(edges_last_60_20)
return G
def centrality_analysis():
types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.degree_centrality]
for x in types:
# based upon cleaning values chosen, choose a directory to store results to.
file = open('./centrality/40_10/centrality_results_'+x.__name__+'.txt', 'w')
nodes = x(graph)
top_10 = list()
top_10_ids = list()
sorted_values = list(nodes.values())
sorted_values.sort()
sorted_values.reverse()
top_10 = sorted_values[0]
# print(sorted_values)
# for y in top_10:
for x in nodes.keys():
if nodes[x] == top_10:
top_10_ids.append(x)
file.write(str(len(top_10_ids)) + '\n')
for x in top_10_ids:
for y in cleaned_actors:
if x == y.getId():
print(y.getName())
#file.write(y.getName() + '\n')
file.close()
def community_analysis():
f = open('./community/communities_outputs.txt', 'w')
communities_generator = nx.community.girvan_newman(graph)
communities = next(communities_generator)
size = len(communities)
while size < 10:
print(communities)
communities = next(communities_generator)
size = len(communities)
f.write('community iteration: size = {}, {} \n'.format(size, communities))
def link_pred():
splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))
friends_PG = list()
for x in splPG.keys():
for y in splPG[x].keys():
if splPG[x][y] == 2:
l = list()
l.append(x)
l.append(y)
friends_PG.append(l)
predictions = nx.jaccard_coefficient(PG, friends_PG)
results = list()
for x in predictions:
results.append(x)
results.sort(key=lambda x: x[2])
results.reverse()
k_vals = [10,20,50,100]
for k in k_vals:
f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w')
count = 0
while (count < k):
print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[count][0]].getName(), all_actors_id_map[results[count][1]].getName(), results[count][2]))
f.write('({}, {}),jaccard: {}\n'.format(all_actors_id_map[results[count][0]].getName(),all_actors_id_map[results[count][1]].getName(),results[count][2]))
count+=1
top_k = list()
precision_at_k = 0
for x in range(k):
top_k.append(results[x])
count = 0
for val in top_k:
tup = (val[0], val[1])
if tup in edges:
count += 1
precision_at_k = count / k
print('precision @ K{}: {}\n'.format(k, precision_at_k))
f.write('precision @ K{}: {}'.format(k, precision_at_k))
f.close()
#Convert community results from IDs to Actor name
def convert_id_actor():
file = open('./community_/communities_outputs.txt')
for row in file:
items = row.split(', ')
i = 0
while i < len(items):
items[i].strip('\n')
items[i] = int(items[i])
i+=1
i = 0
this_row = list()
i= 0
while i < len(items):
this_row.append(items[i])
i+=1
comm.append(this_row)
file.close()
file = open('./actorname_communities.txt', 'w')
for x in range(len(comm)):
for y in range(len(comm[x])):
try:
comm[x][y] = all_actors_id_map[comm[x][y]].getName()
except:
comm[x][y] = 'None'
comm.reverse()
for x in range(len(comm)):
print("Community #{}: {}".format(x, comm[x]))
file.write("Community #{}: {}\n".format(x, comm[x]))
file.flush()
file.close()
clean_movies(60)
clean(30)
graph = createGraph()
print(nx.info(graph))
print(nx.info(PG))
# To perform the analysis, uncomment the respective function(s); additionally, uncomment #convert_id_actor() for community_analysis.
# centrality_analysis()
# community_analysis()
# convert_id_actor()
# link_pred()
|
normal
|
{
"blob_id": "0934163fc6461e30a73c06e74b3a5e983ed2fa02",
"index": 4211,
"step-1": "<mask token>\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n <mask token>\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film: int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size,\n communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n k_vals = [10, 20, 50, 100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) +\n '.txt', 'w')\n count = 0\n while count < k:\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[\n count][0]].getName(), all_actors_id_map[results[count][1]].\n getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[\n results[count][0]].getName(), all_actors_id_map[results[\n count][1]].getName(), results[count][2]))\n count += 1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = val[0], val[1]\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film: int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n\n\ndef clean(threshold: int):\n for actorName in all_actors.keys():\n if len(all_actors[actorName].getFilms()) > threshold:\n cleaned_actors.add(all_actors[actorName])\n else:\n for movie in all_actors[actorName].getFilms():\n if all_actors[actorName] in movie.getActors():\n movie.getActors().remove(all_actors[actorName])\n\n\ndef clean_movies(threshold: int):\n for movie in cleaned_movies_1:\n if 2017 - movie.getDate() <= threshold:\n cleaned_movies.add(movie)\n else:\n for actor in movie.getActors():\n s = actor.getFilms()\n s.remove(movie)\n\n\ndef createGraph():\n counter = 0\n G = nx.Graph()\n PG_actors = set()\n for actor in cleaned_actors:\n G.add_node(actor.getId())\n for movie in cleaned_movies:\n actorIds = set()\n for actor in movie.getActors():\n actorIds.add(actor.getId())\n combinations = itertools.combinations(actorIds, 2)\n for comb in combinations:\n reverse = comb[::-1]\n if comb not in edges and reverse not in edges:\n counter += 1\n if 2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20:\n if (comb not in edges_last_60_20 and reverse not in\n edges_last_60_20):\n edges_last_60_20.add(comb)\n edges.add(comb)\n weights[comb] = 1\n elif comb in edges:\n weights[comb] = weights[comb] + 1\n elif reverse in edges:\n weights[reverse] = weights[reverse] + 1\n G.add_edges_from(edges)\n for x in edges_last_60_20:\n if x[0] not in PG_actors:\n PG_actors.add(x[0])\n if x[1] not in PG_actors:\n PG_actors.add(x[1])\n PG.add_nodes_from(PG_actors)\n PG.add_edges_from(edges_last_60_20)\n return G\n\n\ndef centrality_analysis():\n types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.\n degree_centrality]\n for x in types:\n file = open('./centrality/40_10/centrality_results_' + x.__name__ +\n '.txt', 'w')\n nodes = x(graph)\n top_10 = list()\n top_10_ids = list()\n sorted_values = list(nodes.values())\n sorted_values.sort()\n sorted_values.reverse()\n top_10 = sorted_values[0]\n for x in nodes.keys():\n if nodes[x] == top_10:\n top_10_ids.append(x)\n file.write(str(len(top_10_ids)) + '\\n')\n for x in top_10_ids:\n for y in cleaned_actors:\n if x == y.getId():\n print(y.getName())\n file.close()\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size,\n communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n k_vals = [10, 20, 50, 100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) +\n '.txt', 'w')\n count = 0\n while count < k:\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[\n count][0]].getName(), all_actors_id_map[results[count][1]].\n getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[\n results[count][0]].getName(), all_actors_id_map[results[\n count][1]].getName(), results[count][2]))\n count += 1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = val[0], val[1]\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n\ndef convert_id_actor():\n file = open('./community_/communities_outputs.txt')\n for row in file:\n items = row.split(', ')\n i = 0\n while i < len(items):\n items[i].strip('\\n')\n items[i] = int(items[i])\n i += 1\n i = 0\n this_row = list()\n i = 0\n while i < len(items):\n this_row.append(items[i])\n i += 1\n comm.append(this_row)\n file.close()\n file = open('./actorname_communities.txt', 'w')\n for x in range(len(comm)):\n for y in range(len(comm[x])):\n try:\n comm[x][y] = all_actors_id_map[comm[x][y]].getName()\n except:\n comm[x][y] = 'None'\n comm.reverse()\n for x in range(len(comm)):\n print('Community #{}: {}'.format(x, comm[x]))\n file.write('Community #{}: {}\\n'.format(x, comm[x]))\n file.flush()\n file.close()\n\n\n<mask token>\n",
"step-5": "import csv\nimport json\nimport re\nimport itertools\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.algorithms import community\nimport snap\nimport numpy\n\n# setting up data structures to map actor IDs to objects in order to increase run time.\ncsv.field_size_limit(100000000)\ncurr_actor_id = 1\nall_actors = dict()\nall_actors_id_map = dict()\nall_actors_frequencies = dict()\nedges = set()\nweights = dict()\nmovies = list()\nmovies_dict = dict()\nedges_last_60_20 = set()\ncomm = list()\nPG = nx.Graph()\n\nclass Actor:\n\n def __init__(self, name: str, id:int):\n self.filmography = set()\n self.name = name\n self.id = id\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film:int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = \"\"\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor:Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add:set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n#parsing data from csv and dropping crew column\nreader = pd.read_csv('credits.csv', header = 0)\ncrewless = reader.drop('crew', axis = 1)\ncleanup = re.compile('[^a-zA-Z\\s]')\n\n#skip the header row\nrow = crewless.iterrows()\n\n#loop through each row\nfor x in range(len(reader.index)):\n cur_row = next(row)\n data = cur_row[1][0]\n id = cur_row[1][1]\n actors = set()\n\n #create an instance of a Movie for each row\n movie = Movie(int(id))\n movies.append(movie)\n movies_dict[id] = movie\n\n #split the string around each name\n split_around_names = data.split('name')\n\n #parse actors, and create an instance of Actor for each actor in each movie\n for y in range(1, len(split_around_names)):\n #Cleaning up characters and spaces around the actor's name\n actorName = str(split_around_names[y].split('order')[0])\n actorName = cleanup.sub(' ', actorName)\n actorName = actorName.strip()\n #Create the Actor and update his/her filmography\n if actorName not in all_actors.keys():\n a = Actor(actorName, curr_actor_id)\n curr_actor_id += 1\n a.updateFilms(movie)\n actors.add(a)\n all_actors[actorName] = a\n all_actors_frequencies[a] = 1\n all_actors_id_map[curr_actor_id] = a\n else:\n all_actors[actorName].updateFilms(movie)\n all_actors_frequencies[a] += 1\n actors.add(all_actors[actorName])\n #Update the set of actors per movie\n movie.updateActors(actors)\n\nreader = pd.read_csv('movies_metadata.csv', header = 0)\nreader.drop(reader.columns.difference(['id', 'release_date']), 1, inplace=True)\nrow = reader.iterrows()\n\ncleaned_actors = set()\ncleaned_movies_1 = set()\ncleaned_movies = set()\n\n# adding ids to movies from movie files\nfor x in range(len(reader.index)):\n cur_row = next(row)\n id = cur_row[1][0]\n date = cur_row[1][1]\n id = int(id)\n year = date[:4]\n year_int = int(year)\n if id in movies_dict.keys():\n movies_dict[id].setDate(year_int)\n cleaned_movies_1.add(movies_dict[id])\n\n\ndef clean(threshold: int):\n for actorName in all_actors.keys():\n if len(all_actors[actorName].getFilms()) > threshold:\n cleaned_actors.add(all_actors[actorName])\n else:\n for movie in all_actors[actorName].getFilms():\n if all_actors[actorName] in movie.getActors():\n movie.getActors().remove(all_actors[actorName])\n\n\ndef clean_movies(threshold: int):\n for movie in cleaned_movies_1:\n if 2017 - movie.getDate() <= threshold:\n cleaned_movies.add(movie)\n else:\n for actor in movie.getActors():\n s = actor.getFilms()\n s.remove(movie)\n\n\ndef createGraph():\n counter = 0\n G = nx.Graph()\n PG_actors = set()\n\n #fill graph with nodes\n for actor in cleaned_actors:\n G.add_node(actor.getId())\n\n #generate a list of edges and weights based on frequencie of combination appearances\n for movie in cleaned_movies:\n actorIds = set()\n for actor in movie.getActors():\n actorIds.add(actor.getId())\n combinations = itertools.combinations(actorIds, 2)\n for comb in combinations:\n reverse = comb[::-1]\n if (comb not in edges) and (reverse not in edges):\n counter+=1\n if (2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20):\n if (comb not in edges_last_60_20) and (reverse not in edges_last_60_20):\n edges_last_60_20.add(comb)\n edges.add(comb)\n weights[comb] = 1\n else:\n if comb in edges:\n weights[comb] = weights[comb] + 1\n elif reverse in edges:\n weights[reverse] = weights[reverse] + 1\n G.add_edges_from(edges)\n for x in edges_last_60_20:\n if x[0] not in PG_actors:\n PG_actors.add(x[0])\n if x[1] not in PG_actors:\n PG_actors.add(x[1])\n PG.add_nodes_from(PG_actors)\n PG.add_edges_from(edges_last_60_20)\n return G\n\n\ndef centrality_analysis():\n types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.degree_centrality]\n\n for x in types:\n\n # based upon cleaning values chosen, choose a directory to store results to.\n file = open('./centrality/40_10/centrality_results_'+x.__name__+'.txt', 'w')\n nodes = x(graph)\n top_10 = list()\n top_10_ids = list()\n\n sorted_values = list(nodes.values())\n sorted_values.sort()\n sorted_values.reverse()\n\n top_10 = sorted_values[0]\n # print(sorted_values)\n\n # for y in top_10:\n for x in nodes.keys():\n if nodes[x] == top_10:\n top_10_ids.append(x)\n\n file.write(str(len(top_10_ids)) + '\\n')\n for x in top_10_ids:\n for y in cleaned_actors:\n if x == y.getId():\n print(y.getName())\n #file.write(y.getName() + '\\n')\n file.close()\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size, communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n\n k_vals = [10,20,50,100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w')\n count = 0\n while (count < k):\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[count][0]].getName(), all_actors_id_map[results[count][1]].getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[results[count][0]].getName(),all_actors_id_map[results[count][1]].getName(),results[count][2]))\n count+=1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = (val[0], val[1])\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n#Convert community results from IDs to Actor name\ndef convert_id_actor():\n file = open('./community_/communities_outputs.txt')\n for row in file:\n items = row.split(', ')\n i = 0\n while i < len(items):\n items[i].strip('\\n')\n items[i] = int(items[i])\n i+=1\n i = 0\n this_row = list()\n i= 0\n while i < len(items):\n this_row.append(items[i])\n i+=1\n comm.append(this_row)\n file.close()\n file = open('./actorname_communities.txt', 'w')\n for x in range(len(comm)):\n for y in range(len(comm[x])):\n try:\n comm[x][y] = all_actors_id_map[comm[x][y]].getName()\n except:\n comm[x][y] = 'None'\n comm.reverse()\n for x in range(len(comm)):\n print(\"Community #{}: {}\".format(x, comm[x]))\n file.write(\"Community #{}: {}\\n\".format(x, comm[x]))\n file.flush()\n file.close()\n\n\nclean_movies(60)\nclean(30)\n\ngraph = createGraph()\nprint(nx.info(graph))\nprint(nx.info(PG))\n\n\n# To perform the analysis, uncomment the respective function(s); additionally, uncomment #convert_id_actor() for community_analysis.\n# centrality_analysis()\n# community_analysis()\n# convert_id_actor()\n# link_pred()\n",
"step-ids": [
7,
11,
17,
22,
26
]
}
|
[
7,
11,
17,
22,
26
] |
import unittest
import sys
from tests.jep_pipe import jep_pipe
from tests.jep_pipe import build_java_process_cmd
import jep
@unittest.skipIf(sys.platform.startswith("win"), "subprocess complications on Windows")
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_array_to_string(self):
jep_pipe(build_java_process_cmd(
'jep.test.numpy.TestNumpyArrayToString'))
|
normal
|
{
"blob_id": "39bc90f34cccebe9a8b1475e396caa1c14f6b2df",
"index": 9004,
"step-1": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n <mask token>\n",
"step-3": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-4": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-5": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\[email protected](sys.platform.startswith(\"win\"), \"subprocess complications on Windows\")\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
This module provides functions to make WCSAxes work in SunPy.
"""
import matplotlib.pyplot as plt
from packaging.version import Version
import astropy.units as u
from astropy import __version__ as astropy_version
from astropy.visualization import wcsaxes
from sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst
__all__ = ["is_wcsaxes", "gca_wcs", "get_world_transform",
"default_wcs_grid", "wcsaxes_heliographic_overlay"]
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
def get_world_transform(axes):
"""
Get the transformation to world coordinates.
If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
returns the transform to the "world" coordinates, otherwise it returns
the transform to the matplotlib data coordinates, which are assumed to be in
world coordinates.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
The axes to get the transform from.
Returns
-------
`~matplotlib.transforms.CompositeGenericTransform`
The transformation object.
"""
if is_wcsaxes(axes):
transform = axes.get_transform('world')
else:
transform = axes.transData
return transform
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg = 10*u.deg, annotate=True,
obstime=None, rsun=None, observer=None, system='stonyhurst',
**kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
# Unpack spacing
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError("grid_spacing must be a Quantity of length one or two.")
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(
obstime=obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(
obstime=obstime, observer=observer, rsun=rsun))
else:
raise ValueError(f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
# Set the native coordinates to be bottom and left only so they don't share
# axes with the overlay.
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
# TODO: Remove when we depend on astropy 5.3
if Version(astropy_version) >= Version("5.3.dev"):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
# Don't plot white ticks by default (only if explicitly asked)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
|
normal
|
{
"blob_id": "be1ef0aa3868985bf198781ee827bd447588df15",
"index": 606,
"step-1": "<mask token>\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n return isinstance(axes, wcsaxes.WCSAxes)\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`\n if ``fig`` has no axes.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes. If ``None``, the current\n figure is used (or a new one created if there are no current figures).\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n if not len(fig.get_axes()):\n ax = plt.axes(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n return ax\n\n\n<mask token>\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,\n annotate=True, obstime=None, rsun=None, observer=None, system=\n 'stonyhurst', **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n annotate : `bool`\n Passing `False` disables the axes labels and the ticks on the top and right axes.\n obstime : `~astropy.time.Time`\n The ``obstime`` to use for the grid coordinate frame.\n rsun : `~astropy.units.Quantity`\n The ``rsun`` to use for the grid coordinate frame.\n observer : `~astropy.coordinates.SkyCoord`\n The ``observer`` to use for the grid coordinate frame. Only used for\n Carrington coordinates.\n system : str\n Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.\n If 'carrington', the ``observer`` keyword argument must be specified.\n kwargs :\n Additional keyword arguments are passed to\n :meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\n 'grid_spacing must be a Quantity of length one or two.')\n if system == 'stonyhurst':\n overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=\n obstime, rsun=rsun))\n elif system == 'carrington':\n overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=\n obstime, observer=observer, rsun=rsun))\n else:\n raise ValueError(\n f\"system must be 'stonyhurst' or 'carrington' (got '{system}')\")\n c1, c2 = axes.coords\n c1.set_ticks_position('bl')\n c2.set_ticks_position('bl')\n lon = overlay[0]\n lat = overlay[1]\n if Version(astropy_version) >= Version('5.3.dev'):\n lon.coord_wrap = 180 * u.deg\n else:\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n if annotate:\n lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)\n lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n else:\n lat.set_ticks_visible(False)\n lon.set_ticks_visible(False)\n lat.set_ticklabel_visible(False)\n lon.set_ticklabel_visible(False)\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n lon.set_ticks(spacing=lon_space, color=tick_color)\n lat.set_ticks(spacing=lat_space, color=tick_color)\n overlay.grid(**grid_kw)\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n return overlay\n",
"step-2": "<mask token>\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n return isinstance(axes, wcsaxes.WCSAxes)\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`\n if ``fig`` has no axes.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes. If ``None``, the current\n figure is used (or a new one created if there are no current figures).\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n if not len(fig.get_axes()):\n ax = plt.axes(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the \"world\" coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n The axes to get the transform from.\n\n Returns\n -------\n `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n return transform\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,\n annotate=True, obstime=None, rsun=None, observer=None, system=\n 'stonyhurst', **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n annotate : `bool`\n Passing `False` disables the axes labels and the ticks on the top and right axes.\n obstime : `~astropy.time.Time`\n The ``obstime`` to use for the grid coordinate frame.\n rsun : `~astropy.units.Quantity`\n The ``rsun`` to use for the grid coordinate frame.\n observer : `~astropy.coordinates.SkyCoord`\n The ``observer`` to use for the grid coordinate frame. Only used for\n Carrington coordinates.\n system : str\n Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.\n If 'carrington', the ``observer`` keyword argument must be specified.\n kwargs :\n Additional keyword arguments are passed to\n :meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\n 'grid_spacing must be a Quantity of length one or two.')\n if system == 'stonyhurst':\n overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=\n obstime, rsun=rsun))\n elif system == 'carrington':\n overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=\n obstime, observer=observer, rsun=rsun))\n else:\n raise ValueError(\n f\"system must be 'stonyhurst' or 'carrington' (got '{system}')\")\n c1, c2 = axes.coords\n c1.set_ticks_position('bl')\n c2.set_ticks_position('bl')\n lon = overlay[0]\n lat = overlay[1]\n if Version(astropy_version) >= Version('5.3.dev'):\n lon.coord_wrap = 180 * u.deg\n else:\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n if annotate:\n lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)\n lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n else:\n lat.set_ticks_visible(False)\n lon.set_ticks_visible(False)\n lat.set_ticklabel_visible(False)\n lon.set_ticklabel_visible(False)\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n lon.set_ticks(spacing=lon_space, color=tick_color)\n lat.set_ticks(spacing=lat_space, color=tick_color)\n overlay.grid(**grid_kw)\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n return overlay\n",
"step-3": "<mask token>\n__all__ = ['is_wcsaxes', 'gca_wcs', 'get_world_transform',\n 'default_wcs_grid', 'wcsaxes_heliographic_overlay']\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n return isinstance(axes, wcsaxes.WCSAxes)\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`\n if ``fig`` has no axes.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes. If ``None``, the current\n figure is used (or a new one created if there are no current figures).\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n if not len(fig.get_axes()):\n ax = plt.axes(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the \"world\" coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n The axes to get the transform from.\n\n Returns\n -------\n `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n return transform\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,\n annotate=True, obstime=None, rsun=None, observer=None, system=\n 'stonyhurst', **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n annotate : `bool`\n Passing `False` disables the axes labels and the ticks on the top and right axes.\n obstime : `~astropy.time.Time`\n The ``obstime`` to use for the grid coordinate frame.\n rsun : `~astropy.units.Quantity`\n The ``rsun`` to use for the grid coordinate frame.\n observer : `~astropy.coordinates.SkyCoord`\n The ``observer`` to use for the grid coordinate frame. Only used for\n Carrington coordinates.\n system : str\n Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.\n If 'carrington', the ``observer`` keyword argument must be specified.\n kwargs :\n Additional keyword arguments are passed to\n :meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\n 'grid_spacing must be a Quantity of length one or two.')\n if system == 'stonyhurst':\n overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=\n obstime, rsun=rsun))\n elif system == 'carrington':\n overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=\n obstime, observer=observer, rsun=rsun))\n else:\n raise ValueError(\n f\"system must be 'stonyhurst' or 'carrington' (got '{system}')\")\n c1, c2 = axes.coords\n c1.set_ticks_position('bl')\n c2.set_ticks_position('bl')\n lon = overlay[0]\n lat = overlay[1]\n if Version(astropy_version) >= Version('5.3.dev'):\n lon.coord_wrap = 180 * u.deg\n else:\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n if annotate:\n lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)\n lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n else:\n lat.set_ticks_visible(False)\n lon.set_ticks_visible(False)\n lat.set_ticklabel_visible(False)\n lon.set_ticklabel_visible(False)\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n lon.set_ticks(spacing=lon_space, color=tick_color)\n lat.set_ticks(spacing=lat_space, color=tick_color)\n overlay.grid(**grid_kw)\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n return overlay\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nfrom packaging.version import Version\nimport astropy.units as u\nfrom astropy import __version__ as astropy_version\nfrom astropy.visualization import wcsaxes\nfrom sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst\n__all__ = ['is_wcsaxes', 'gca_wcs', 'get_world_transform',\n 'default_wcs_grid', 'wcsaxes_heliographic_overlay']\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n return isinstance(axes, wcsaxes.WCSAxes)\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`\n if ``fig`` has no axes.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes. If ``None``, the current\n figure is used (or a new one created if there are no current figures).\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n if not len(fig.get_axes()):\n ax = plt.axes(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the \"world\" coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n The axes to get the transform from.\n\n Returns\n -------\n `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n return transform\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,\n annotate=True, obstime=None, rsun=None, observer=None, system=\n 'stonyhurst', **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n annotate : `bool`\n Passing `False` disables the axes labels and the ticks on the top and right axes.\n obstime : `~astropy.time.Time`\n The ``obstime`` to use for the grid coordinate frame.\n rsun : `~astropy.units.Quantity`\n The ``rsun`` to use for the grid coordinate frame.\n observer : `~astropy.coordinates.SkyCoord`\n The ``observer`` to use for the grid coordinate frame. Only used for\n Carrington coordinates.\n system : str\n Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.\n If 'carrington', the ``observer`` keyword argument must be specified.\n kwargs :\n Additional keyword arguments are passed to\n :meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\n 'grid_spacing must be a Quantity of length one or two.')\n if system == 'stonyhurst':\n overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=\n obstime, rsun=rsun))\n elif system == 'carrington':\n overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=\n obstime, observer=observer, rsun=rsun))\n else:\n raise ValueError(\n f\"system must be 'stonyhurst' or 'carrington' (got '{system}')\")\n c1, c2 = axes.coords\n c1.set_ticks_position('bl')\n c2.set_ticks_position('bl')\n lon = overlay[0]\n lat = overlay[1]\n if Version(astropy_version) >= Version('5.3.dev'):\n lon.coord_wrap = 180 * u.deg\n else:\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n if annotate:\n lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)\n lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n else:\n lat.set_ticks_visible(False)\n lon.set_ticks_visible(False)\n lat.set_ticklabel_visible(False)\n lon.set_ticklabel_visible(False)\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n lon.set_ticks(spacing=lon_space, color=tick_color)\n lat.set_ticks(spacing=lat_space, color=tick_color)\n overlay.grid(**grid_kw)\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n return overlay\n",
"step-5": "\"\"\"\nThis module provides functions to make WCSAxes work in SunPy.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom packaging.version import Version\n\nimport astropy.units as u\nfrom astropy import __version__ as astropy_version\nfrom astropy.visualization import wcsaxes\n\nfrom sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst\n\n__all__ = [\"is_wcsaxes\", \"gca_wcs\", \"get_world_transform\",\n \"default_wcs_grid\", \"wcsaxes_heliographic_overlay\"]\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n return isinstance(axes, wcsaxes.WCSAxes)\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`\n if ``fig`` has no axes.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes. If ``None``, the current\n figure is used (or a new one created if there are no current figures).\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n ax = plt.axes(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the \"world\" coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n The axes to get the transform from.\n\n Returns\n -------\n `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg = 10*u.deg, annotate=True,\n obstime=None, rsun=None, observer=None, system='stonyhurst',\n **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n annotate : `bool`\n Passing `False` disables the axes labels and the ticks on the top and right axes.\n obstime : `~astropy.time.Time`\n The ``obstime`` to use for the grid coordinate frame.\n rsun : `~astropy.units.Quantity`\n The ``rsun`` to use for the grid coordinate frame.\n observer : `~astropy.coordinates.SkyCoord`\n The ``observer`` to use for the grid coordinate frame. Only used for\n Carrington coordinates.\n system : str\n Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.\n If 'carrington', the ``observer`` keyword argument must be specified.\n kwargs :\n Additional keyword arguments are passed to\n :meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n # Unpack spacing\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\"grid_spacing must be a Quantity of length one or two.\")\n\n if system == 'stonyhurst':\n overlay = axes.get_coords_overlay(HeliographicStonyhurst(\n obstime=obstime, rsun=rsun))\n elif system == 'carrington':\n overlay = axes.get_coords_overlay(HeliographicCarrington(\n obstime=obstime, observer=observer, rsun=rsun))\n else:\n raise ValueError(f\"system must be 'stonyhurst' or 'carrington' (got '{system}')\")\n\n # Set the native coordinates to be bottom and left only so they don't share\n # axes with the overlay.\n c1, c2 = axes.coords\n c1.set_ticks_position('bl')\n c2.set_ticks_position('bl')\n\n lon = overlay[0]\n lat = overlay[1]\n\n # TODO: Remove when we depend on astropy 5.3\n if Version(astropy_version) >= Version(\"5.3.dev\"):\n lon.coord_wrap = 180 * u.deg\n else:\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n if annotate:\n lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)\n lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n else:\n lat.set_ticks_visible(False)\n lon.set_ticks_visible(False)\n lat.set_ticklabel_visible(False)\n lon.set_ticklabel_visible(False)\n\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n\n # Don't plot white ticks by default (only if explicitly asked)\n tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n lon.set_ticks(spacing=lon_space, color=tick_color)\n lat.set_ticks(spacing=lat_space, color=tick_color)\n\n overlay.grid(**grid_kw)\n\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n\n return overlay\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/python
import xml.dom.minidom
import os
import matplotlib.pyplot as plt
import cPickle as p
import numpy as np
def modifyXML(name,numCar):
DOMTree = xml.dom.minidom.parse(name)
objects=DOMTree.getElementsByTagName('object')
for object in objects:
if object.getElementsByTagName('name')[0].childNodes[0].nodeValue =='Car':
xmin=float(object.getElementsByTagName('xmin')[0].childNodes[0].nodeValue)
ymin=float(object.getElementsByTagName('ymin')[0].childNodes[0].nodeValue)
xmax=float(object.getElementsByTagName('xmax')[0].childNodes[0].nodeValue)
ymax=float(object.getElementsByTagName('ymax')[0].childNodes[0].nodeValue)
numCar.append((ymax-ymin)*(xmax-xmin))
dir=os.getcwd()+'/xml'
file=os.listdir(dir)
numCar=[]
for filename in file:
#print filename
if filename[0]!='.':
modifyXML('xml/'+filename,numCar)
num_bins=40
size=len(numCar)
print 'size of numCar = %d'%size
f=open('boxArea.pkl','w')
p.dump(numCar,f)
f.close()
i=0
for x in numCar:
if x>40000:
i=i+1
print 'num of car bigger than %d is %d'%(40000,i)
'''
plt.hist(numCar, num_bins,facecolor='blue', alpha=0.5)
plt.show()
'''
|
normal
|
{
"blob_id": "1c13a9ca3617dc6f1a1f1aa8249cce37062a449b",
"index": 8243,
"step-1": "#!/usr/bin/python\nimport xml.dom.minidom\nimport os\nimport matplotlib.pyplot as plt\nimport cPickle as p\nimport numpy as np\n\ndef modifyXML(name,numCar):\n\tDOMTree = xml.dom.minidom.parse(name)\n\tobjects=DOMTree.getElementsByTagName('object')\n\tfor object in objects:\n\t\tif object.getElementsByTagName('name')[0].childNodes[0].nodeValue =='Car':\n\t\t\txmin=float(object.getElementsByTagName('xmin')[0].childNodes[0].nodeValue)\n\t\t\tymin=float(object.getElementsByTagName('ymin')[0].childNodes[0].nodeValue)\n\t\t\txmax=float(object.getElementsByTagName('xmax')[0].childNodes[0].nodeValue)\n\t\t\tymax=float(object.getElementsByTagName('ymax')[0].childNodes[0].nodeValue)\n numCar.append((ymax-ymin)*(xmax-xmin))\n\ndir=os.getcwd()+'/xml'\nfile=os.listdir(dir)\nnumCar=[]\nfor filename in file:\n\t#print filename\n\tif filename[0]!='.':\n\t\tmodifyXML('xml/'+filename,numCar)\nnum_bins=40\nsize=len(numCar)\nprint 'size of numCar = %d'%size\n\nf=open('boxArea.pkl','w')\np.dump(numCar,f)\nf.close()\n\ni=0\nfor x in numCar:\n if x>40000:\n i=i+1\nprint 'num of car bigger than %d is %d'%(40000,i)\n'''\nplt.hist(numCar, num_bins,facecolor='blue', alpha=0.5)\nplt.show()\n'''\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import json
import requests
from fin import myBuilder, myParser
import time
def open_config():
if os.path.isfile('fin/config.json') != True:
return ('no config found')
else:
print('config found')
with open('fin/config.json') as conf:
conf = json.load(conf)
return conf
conf = open_config()
logfile = conf.get('game_path')
database_path = conf.get('database_path')
application_id = str(conf.get('application_id'))
url = str(conf.get('url'))
def get_local_date():
try:
with open(logfile) as Log:
LogJSON = json.load(Log)
Log.close()
LocalDateTime = LogJSON['dateTime']
print('LocalDateTime:', LocalDateTime)
return LocalDateTime
except:
print('no logfile found')
def get_remote_date():
try:
r = requests.get(url)
answer = r.json()
if answer is not None:
print('RemoteDate:', answer)
return answer
else:
print('no remote date found')
except:
print('no remote connection found')
def build_exportData(LocalDate):
print('exportData:')
exportData = myBuilder.build_export(LocalDate)
return (exportData)
def post_Result(Result):
try:
res = requests.post(url, json=Result)
if res.ok:
print(res.json())
except:
print('error POST request')
def compare_dates():
RemoteDate = str(get_remote_date())
LocalDate = str(get_local_date())
if LocalDate == RemoteDate:
print('dates match')
else:
print('no match')
print('LocalDate:', LocalDate)
print('RemoteDate:', RemoteDate)
try:
print(myParser.main_update())
Result = build_exportData(LocalDate)
post_Result(Result)
time.sleep(10)
except:
print('error parsing')
def loop():
while True:
compare_dates()
time.sleep(5)
# def main():
loop()
|
normal
|
{
"blob_id": "e690587c9b056f8d5a1be6dd062a2aa32e215f50",
"index": 2328,
"step-1": "<mask token>\n\n\ndef open_config():\n if os.path.isfile('fin/config.json') != True:\n return 'no config found'\n else:\n print('config found')\n with open('fin/config.json') as conf:\n conf = json.load(conf)\n return conf\n\n\n<mask token>\n\n\ndef get_local_date():\n try:\n with open(logfile) as Log:\n LogJSON = json.load(Log)\n Log.close()\n LocalDateTime = LogJSON['dateTime']\n print('LocalDateTime:', LocalDateTime)\n return LocalDateTime\n except:\n print('no logfile found')\n\n\n<mask token>\n\n\ndef build_exportData(LocalDate):\n print('exportData:')\n exportData = myBuilder.build_export(LocalDate)\n return exportData\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef open_config():\n if os.path.isfile('fin/config.json') != True:\n return 'no config found'\n else:\n print('config found')\n with open('fin/config.json') as conf:\n conf = json.load(conf)\n return conf\n\n\n<mask token>\n\n\ndef get_local_date():\n try:\n with open(logfile) as Log:\n LogJSON = json.load(Log)\n Log.close()\n LocalDateTime = LogJSON['dateTime']\n print('LocalDateTime:', LocalDateTime)\n return LocalDateTime\n except:\n print('no logfile found')\n\n\n<mask token>\n\n\ndef build_exportData(LocalDate):\n print('exportData:')\n exportData = myBuilder.build_export(LocalDate)\n return exportData\n\n\ndef post_Result(Result):\n try:\n res = requests.post(url, json=Result)\n if res.ok:\n print(res.json())\n except:\n print('error POST request')\n\n\ndef compare_dates():\n RemoteDate = str(get_remote_date())\n LocalDate = str(get_local_date())\n if LocalDate == RemoteDate:\n print('dates match')\n else:\n print('no match')\n print('LocalDate:', LocalDate)\n print('RemoteDate:', RemoteDate)\n try:\n print(myParser.main_update())\n Result = build_exportData(LocalDate)\n post_Result(Result)\n time.sleep(10)\n except:\n print('error parsing')\n\n\ndef loop():\n while True:\n compare_dates()\n time.sleep(5)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef open_config():\n if os.path.isfile('fin/config.json') != True:\n return 'no config found'\n else:\n print('config found')\n with open('fin/config.json') as conf:\n conf = json.load(conf)\n return conf\n\n\n<mask token>\n\n\ndef get_local_date():\n try:\n with open(logfile) as Log:\n LogJSON = json.load(Log)\n Log.close()\n LocalDateTime = LogJSON['dateTime']\n print('LocalDateTime:', LocalDateTime)\n return LocalDateTime\n except:\n print('no logfile found')\n\n\ndef get_remote_date():\n try:\n r = requests.get(url)\n answer = r.json()\n if answer is not None:\n print('RemoteDate:', answer)\n return answer\n else:\n print('no remote date found')\n except:\n print('no remote connection found')\n\n\ndef build_exportData(LocalDate):\n print('exportData:')\n exportData = myBuilder.build_export(LocalDate)\n return exportData\n\n\ndef post_Result(Result):\n try:\n res = requests.post(url, json=Result)\n if res.ok:\n print(res.json())\n except:\n print('error POST request')\n\n\ndef compare_dates():\n RemoteDate = str(get_remote_date())\n LocalDate = str(get_local_date())\n if LocalDate == RemoteDate:\n print('dates match')\n else:\n print('no match')\n print('LocalDate:', LocalDate)\n print('RemoteDate:', RemoteDate)\n try:\n print(myParser.main_update())\n Result = build_exportData(LocalDate)\n post_Result(Result)\n time.sleep(10)\n except:\n print('error parsing')\n\n\ndef loop():\n while True:\n compare_dates()\n time.sleep(5)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef open_config():\n if os.path.isfile('fin/config.json') != True:\n return 'no config found'\n else:\n print('config found')\n with open('fin/config.json') as conf:\n conf = json.load(conf)\n return conf\n\n\nconf = open_config()\nlogfile = conf.get('game_path')\ndatabase_path = conf.get('database_path')\napplication_id = str(conf.get('application_id'))\nurl = str(conf.get('url'))\n\n\ndef get_local_date():\n try:\n with open(logfile) as Log:\n LogJSON = json.load(Log)\n Log.close()\n LocalDateTime = LogJSON['dateTime']\n print('LocalDateTime:', LocalDateTime)\n return LocalDateTime\n except:\n print('no logfile found')\n\n\ndef get_remote_date():\n try:\n r = requests.get(url)\n answer = r.json()\n if answer is not None:\n print('RemoteDate:', answer)\n return answer\n else:\n print('no remote date found')\n except:\n print('no remote connection found')\n\n\ndef build_exportData(LocalDate):\n print('exportData:')\n exportData = myBuilder.build_export(LocalDate)\n return exportData\n\n\ndef post_Result(Result):\n try:\n res = requests.post(url, json=Result)\n if res.ok:\n print(res.json())\n except:\n print('error POST request')\n\n\ndef compare_dates():\n RemoteDate = str(get_remote_date())\n LocalDate = str(get_local_date())\n if LocalDate == RemoteDate:\n print('dates match')\n else:\n print('no match')\n print('LocalDate:', LocalDate)\n print('RemoteDate:', RemoteDate)\n try:\n print(myParser.main_update())\n Result = build_exportData(LocalDate)\n post_Result(Result)\n time.sleep(10)\n except:\n print('error parsing')\n\n\ndef loop():\n while True:\n compare_dates()\n time.sleep(5)\n\n\nloop()\n",
"step-5": "import os\nimport json\nimport requests\nfrom fin import myBuilder, myParser\nimport time\n\n\ndef open_config():\n\tif os.path.isfile('fin/config.json') != True:\n\t\treturn ('no config found')\n\telse:\n\t\tprint('config found')\n\n\twith open('fin/config.json') as conf:\n\t\tconf = json.load(conf)\n\t\treturn conf\n\n\nconf = open_config()\nlogfile = conf.get('game_path')\ndatabase_path = conf.get('database_path')\napplication_id = str(conf.get('application_id'))\nurl = str(conf.get('url'))\n\n\ndef get_local_date():\n\ttry:\n\t\twith open(logfile) as Log:\n\t\t\tLogJSON = json.load(Log)\n\t\t\tLog.close()\n\t\t\tLocalDateTime = LogJSON['dateTime']\n\t\t\tprint('LocalDateTime:', LocalDateTime)\n\t\t\treturn LocalDateTime\n\texcept:\n\t\tprint('no logfile found')\n\n\ndef get_remote_date():\n\ttry:\n\t\tr = requests.get(url)\n\t\tanswer = r.json()\n\t\tif answer is not None:\n\t\t\tprint('RemoteDate:', answer)\n\t\t\treturn answer\n\t\telse:\n\t\t\tprint('no remote date found')\n\texcept:\n\t\tprint('no remote connection found')\n\n\ndef build_exportData(LocalDate):\n\tprint('exportData:')\n\texportData = myBuilder.build_export(LocalDate)\n\treturn (exportData)\n\n\ndef post_Result(Result):\n\ttry:\n\t\tres = requests.post(url, json=Result)\n\t\tif res.ok:\n\t\t\tprint(res.json())\n\texcept:\n\t\tprint('error POST request')\n\n\ndef compare_dates():\n\tRemoteDate = str(get_remote_date())\n\tLocalDate = str(get_local_date())\n\n\tif LocalDate == RemoteDate:\n\t\tprint('dates match')\n\telse:\n\t\tprint('no match')\n\t\tprint('LocalDate:', LocalDate)\n\t\tprint('RemoteDate:', RemoteDate)\n\n\t\ttry:\n\t\t\tprint(myParser.main_update())\n\t\t\tResult = build_exportData(LocalDate)\n\t\t\tpost_Result(Result)\n\t\t\ttime.sleep(10)\n\t\texcept:\n\t\t\tprint('error parsing')\n\n\ndef loop():\n\twhile True:\n\t\tcompare_dates()\n\t\ttime.sleep(5)\n\n\n# def main():\n\nloop()\n",
"step-ids": [
3,
6,
7,
9,
11
]
}
|
[
3,
6,
7,
9,
11
] |
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
# build kwargs to pass to the signal handler
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
# keep the strong ref on the request, its a sane lifetime
request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)
def process_response(self, request, response):
# disconnect signals for this request
# runs even if change logging is disabled in case it was disabled after the signal was created
signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))
return response
def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
normal
|
{
"blob_id": "0e03a3b3401075384e580bc2bb8af1a106f1d238",
"index": 2141,
"step-1": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n <mask token>\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-3": "<mask token>\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-4": "from __future__ import unicode_literals\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-5": "from __future__ import unicode_literals\n\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\n\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n\n user = getattr(request, 'user', None)\n\n if user and not user.is_authenticated():\n user = None\n\n # build kwargs to pass to the signal handler\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n\n # keep the strong ref on the request, its a sane lifetime\n request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)\n\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)\n\n def process_response(self, request, response):\n # disconnect signals for this request\n # runs even if change logging is disabled in case it was disabled after the signal was created\n signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))\n\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Created by Yuexiong Ding
# Date: 2018/9/4
# Description:
|
normal
|
{
"blob_id": "ddb139fa3fbfa1218459e3865150465a44a03bea",
"index": 6306,
"step-1": "# Created by Yuexiong Ding\n# Date: 2018/9/4\n# Description: \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
'''
Created on May 17, 2016
@author: Shauryadeep Chaudhuri
'''
import json
import tornado
from engine import Constants as c
from engine.ResultGenerator import ResultGenerator
from ..ServerLogger import ServerLogger
class GetFromURL(tornado.web.RequestHandler):
'''
This class fetches the data requested like index,schema,entry,query from the url and responds with the result
'''
def initialize(self):
self.logger = ServerLogger().getLogger()
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug("Internal Query Generated"+str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info("Result fetched:" + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write("Error: " + str(e))
|
normal
|
{
"blob_id": "5a13c7e3be8a0b5f3baf7106a938fc97f078c5bc",
"index": 7335,
"step-1": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-2": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-3": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-4": "<mask token>\nimport json\nimport tornado\nfrom engine import Constants as c\nfrom engine.ResultGenerator import ResultGenerator\nfrom ..ServerLogger import ServerLogger\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-5": "'''\r\nCreated on May 17, 2016\r\n\r\n@author: Shauryadeep Chaudhuri\r\n'''\r\n\r\nimport json\r\n\r\nimport tornado\r\n\r\nfrom engine import Constants as c\r\nfrom engine.ResultGenerator import ResultGenerator\r\nfrom ..ServerLogger import ServerLogger\r\n\r\n\r\nclass GetFromURL(tornado.web.RequestHandler):\r\n '''\r\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\r\n '''\r\n def initialize(self):\r\n self.logger = ServerLogger().getLogger()\r\n \r\n def get(self, index=None, schema=None, entry=None, query=None):\r\n\r\n query = dict()\r\n\r\n resultGenerator = ResultGenerator()\r\n\r\n query[c.OPERATION] = c.GET\r\n\r\n if index:\r\n query[c.INDEX] = index\r\n if schema:\r\n query[c.SCHEMA] = schema\r\n if entry:\r\n query[c.ENTRY] = entry\r\n \r\n self.logger.debug(\"Internal Query Generated\"+str(query))\r\n \r\n try:\r\n result = str(resultGenerator.processQuery(json.dumps(query)))\r\n \r\n self.logger.info(\"Result fetched:\" + result)\r\n \r\n self.write(result)\r\n except Exception as e:\r\n self.logger.error('Error', exc_info=True)\r\n \r\n self.write(\"Error: \" + str(e))\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from aec.apps.vocabulary.serializers import DictionarySerializer
from aec.apps.vocabulary.models import Word
from aec.apps.library.serializers import LibrarySerializer
from aec.apps.library.models import Library
class Command(BaseCommand):
args = ''
help = 'load vocabulary from csv_file'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.input_options = None
def add_arguments(self, parser):
parser.add_argument(
'-p', '--print',
default=False,
action='store_true',
dest='print',
help='Print info.'
)
parser.add_argument(
'-f', '--file',
dest='file',
help='File for load to db.'
)
parser.add_argument(
'--level',
dest='level',
help='Level for data.'
)
parser.add_argument(
'--lesson',
dest='lesson',
help='Lesson for data.'
)
def print_info(self, template='', context=None):
if self.input_options['print']:
context = context or {}
print str(template).format(**context)
def handle(self, *args, **options):
self.input_options = options
if not options['level']:
raise CommandError("Option `--level=...` must be specified.")
if not options['lesson']:
raise CommandError("Option `--lesson=...` must be specified.")
if not options['file']:
raise CommandError("Option `--file=...` must be specified.")
file_path = os.path.join(settings.BASE_DIR,
'data/{f}'.format(f=options['file']))
if not os.path.isfile(file_path):
raise CommandError("File does not exist at the specified path.")
try:
library = Library.objects.get(level=options['level'],
lesson=options['lesson'])
except ObjectDoesNotExist:
library_serializer = LibrarySerializer(data=options)
if library_serializer.is_valid():
library_serializer.save()
library = Library.objects.get(pk=library_serializer.data['id'])
else:
raise CommandError(library_serializer.errors)
with open(file_path) as dict_file:
csv_data = csv.DictReader(dict_file)
for row in csv_data:
row['english'] = row['english'].lower()
self.print_info('***\n{english}', row)
try:
vocabulary = Word.objects.get(english=row['english'])
self.print_info('{english} - lexicon already exist', row)
vocabulary.library.add(library)
vocabulary.save()
except ObjectDoesNotExist:
row['translate'] = row['translate'].decode('utf-8')
row['library'] = [library.id, ]
vocabulary_serializer = DictionarySerializer(data=row)
if vocabulary_serializer.is_valid():
vocabulary_serializer.save()
else:
self.print_info('error - {error}', dict(
word=row['english'],
error=vocabulary_serializer.errors))
|
normal
|
{
"blob_id": "7d4d5ca14c3e1479059f77c6a7f8dcfad599443b",
"index": 4729,
"step-1": "import os\nimport csv\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom aec.apps.vocabulary.serializers import DictionarySerializer\nfrom aec.apps.vocabulary.models import Word\nfrom aec.apps.library.serializers import LibrarySerializer\nfrom aec.apps.library.models import Library\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'load vocabulary from csv_file'\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.input_options = None\n\n def add_arguments(self, parser):\n parser.add_argument(\n '-p', '--print',\n default=False,\n action='store_true',\n dest='print',\n help='Print info.'\n )\n parser.add_argument(\n '-f', '--file',\n dest='file',\n help='File for load to db.'\n )\n parser.add_argument(\n '--level',\n dest='level',\n help='Level for data.'\n )\n parser.add_argument(\n '--lesson',\n dest='lesson',\n help='Lesson for data.'\n )\n\n def print_info(self, template='', context=None):\n if self.input_options['print']:\n context = context or {}\n print str(template).format(**context)\n\n def handle(self, *args, **options):\n\n self.input_options = options\n\n if not options['level']:\n raise CommandError(\"Option `--level=...` must be specified.\")\n\n if not options['lesson']:\n raise CommandError(\"Option `--lesson=...` must be specified.\")\n\n if not options['file']:\n raise CommandError(\"Option `--file=...` must be specified.\")\n\n file_path = os.path.join(settings.BASE_DIR,\n 'data/{f}'.format(f=options['file']))\n\n if not os.path.isfile(file_path):\n raise CommandError(\"File does not exist at the specified path.\")\n\n try:\n library = Library.objects.get(level=options['level'],\n lesson=options['lesson'])\n except ObjectDoesNotExist:\n library_serializer = LibrarySerializer(data=options)\n if library_serializer.is_valid():\n library_serializer.save()\n library = Library.objects.get(pk=library_serializer.data['id'])\n else:\n raise CommandError(library_serializer.errors)\n\n with open(file_path) as dict_file:\n csv_data = csv.DictReader(dict_file)\n for row in csv_data:\n row['english'] = row['english'].lower()\n self.print_info('***\\n{english}', row)\n try:\n vocabulary = Word.objects.get(english=row['english'])\n self.print_info('{english} - lexicon already exist', row)\n vocabulary.library.add(library)\n vocabulary.save()\n except ObjectDoesNotExist:\n row['translate'] = row['translate'].decode('utf-8')\n row['library'] = [library.id, ]\n vocabulary_serializer = DictionarySerializer(data=row)\n if vocabulary_serializer.is_valid():\n vocabulary_serializer.save()\n else:\n self.print_info('error - {error}', dict(\n word=row['english'],\n error=vocabulary_serializer.errors))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy,math,random
from scipy.io.wavfile import write
notes=[('c',32.7),('c#',34.65),('d',36.71),('d#',38.89),('e',41.2),('f',43.65),
('f#',46.25),('g',49),('g#',51.91),('a',55),('a#',58.27),('b',61.47)]
#notes={'c':32.7,'c#':34.65,'d':36.71,'d#':38.89,'e':41.2,'f':43.65,'f#':46.25,
# 'g':49,'g#':51.91,'a':55,'a#':58.27,'b':61.47}
tempo=80
beatLen=1/(tempo/60)
noteTypes={'q':1,'h':2,'dh':3,'w':4,'e':.5,'s':.25,}
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16):
bytelist = []
TwoPiDivSamplerate = 2*math.pi/samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase*increment
count=0
mid=None
for i in range(int(samplerate*time)):
if incadd > (2**(bitspersample - 1) - 1):
incadd = (2**(bitspersample - 1) - 1) - (incadd - (2**(bitspersample - 1) - 1))
elif incadd < -(2**(bitspersample - 1) - 1):
incadd = -(2**(bitspersample - 1) - 1) + (-(2**(bitspersample - 1) - 1) - incadd)
f=math.e**(-((i-int(samplerate*time)/2)**2)/(2*(int(samplerate*time)/4)**2))
bytelist.append(int(round(f*amp*(2**(bitspersample - 1) - 1)*math.sin(incadd))))
incadd += increment
return bytelist
data = []
for octave in range(2,4):
for note in notes:
f=note[1]
data+=make_wave(f*2**octave,.3)
scaled = numpy.int16(data/numpy.max(numpy.abs(data)) * 32767)
print(scaled)
write('test0.wav', 44100, scaled)
|
normal
|
{
"blob_id": "2ad1b44027b72499c1961f2d2b1c12c356c63d2b",
"index": 5350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\n<mask token>\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\n<mask token>\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-3": "<mask token>\nnotes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e', \n 41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),\n ('a#', 58.27), ('b', 61.47)]\ntempo = 80\nbeatLen = 1 / (tempo / 60)\nnoteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\nscaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-4": "import numpy, math, random\nfrom scipy.io.wavfile import write\nnotes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e', \n 41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),\n ('a#', 58.27), ('b', 61.47)]\ntempo = 80\nbeatLen = 1 / (tempo / 60)\nnoteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\nscaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-5": "import numpy,math,random\nfrom scipy.io.wavfile import write\n\nnotes=[('c',32.7),('c#',34.65),('d',36.71),('d#',38.89),('e',41.2),('f',43.65),\n ('f#',46.25),('g',49),('g#',51.91),('a',55),('a#',58.27),('b',61.47)]\n#notes={'c':32.7,'c#':34.65,'d':36.71,'d#':38.89,'e':41.2,'f':43.65,'f#':46.25,\n # 'g':49,'g#':51.91,'a':55,'a#':58.27,'b':61.47}\ntempo=80\nbeatLen=1/(tempo/60)\nnoteTypes={'q':1,'h':2,'dh':3,'w':4,'e':.5,'s':.25,}\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16):\n bytelist = []\n TwoPiDivSamplerate = 2*math.pi/samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase*increment\n count=0\n mid=None\n for i in range(int(samplerate*time)):\n if incadd > (2**(bitspersample - 1) - 1):\n incadd = (2**(bitspersample - 1) - 1) - (incadd - (2**(bitspersample - 1) - 1))\n elif incadd < -(2**(bitspersample - 1) - 1):\n incadd = -(2**(bitspersample - 1) - 1) + (-(2**(bitspersample - 1) - 1) - incadd)\n f=math.e**(-((i-int(samplerate*time)/2)**2)/(2*(int(samplerate*time)/4)**2))\n bytelist.append(int(round(f*amp*(2**(bitspersample - 1) - 1)*math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2,4):\n for note in notes:\n f=note[1]\n data+=make_wave(f*2**octave,.3)\n\n\n\n\n\nscaled = numpy.int16(data/numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from multiprocessing import Process, Pipe
from time import sleep
from os import getpid
def ponger(pipe, response):
while True:
msg = pipe.recv()
print(f"{getpid()} receiving: {msg}")
sleep(1)
pipe.send(response)
if __name__ == '__main__':
ping_conn, pong_conn = Pipe()
Process(target=ponger, args=(ping_conn, 'ping')).start()
Process(target=ponger, args=(pong_conn, 'pong')).start()
ping_conn.send('ping')
|
normal
|
{
"blob_id": "aac9960dafc9e8d3a5670251fcc54eb8e34d4458",
"index": 9282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n ping_conn.send('ping')\n",
"step-4": "from multiprocessing import Process, Pipe\nfrom time import sleep\nfrom os import getpid\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n ping_conn.send('ping')\n",
"step-5": "from multiprocessing import Process, Pipe\nfrom time import sleep\nfrom os import getpid\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f\"{getpid()} receiving: {msg}\")\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n\n ping_conn.send('ping')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from . import utils
from . import objects
START = (0, 0)
STARTING_LIFE = 10
WHITE = (255, 255, 255)
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
# player initialization
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE,
self.map, STARTING_LIFE, fov=20)
self.objects.append(self.player)
# Add room lables to map
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in self.objects)
def visible_objects(self):
res = []
for object in self.objects:
if object.visible and object.location in self.player.seen:
if self.map.in_area(self.width, self.height, object.location, self.player.location):
res.append(object)
return reversed(res)
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in self.objects)
def get_area(self, width, height):
# Get the current area the player is in based on desired size and players location
return self.map.get_area(width, height, self.player.location)
|
normal
|
{
"blob_id": "5f089c3e67452fe6d14f96a70d792bc0d056b375",
"index": 9227,
"step-1": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n <mask token>\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-2": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-3": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-4": "<mask token>\nSTART = 0, 0\nSTARTING_LIFE = 10\nWHITE = 255, 255, 255\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.\n location, self.player.location):\n res.append(object)\n return reversed(res)\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-5": "from . import utils\nfrom . import objects\n\nSTART = (0, 0)\nSTARTING_LIFE = 10\n\nWHITE = (255, 255, 255)\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n\n self.map = game_map\n self.width = width\n self.height = height\n\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n\n # player initialization\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE,\n self.map, STARTING_LIFE, fov=20)\n\n self.objects.append(self.player)\n\n # Add room lables to map\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.location == location and object.blocks for object in self.objects)\n\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.location, self.player.location):\n res.append(object)\n return reversed(res)\n \n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.blocks and object.location == location for object in self.objects)\n\n def get_area(self, width, height):\n # Get the current area the player is in based on desired size and players location\n return self.map.get_area(width, height, self.player.location)\n\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
import easyocr
import cv2
import json
import numpy as np
import os
import os.path
import glob
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
readers = [
easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu = False),
#easyocr.Reader(['ch_tra'], gpu = False),
#easyocr.Reader(['fa'], gpu = False),
#easyocr.Reader(['hi'], gpu = False),
#easyocr.Reader(['ja'], gpu = False),
#easyocr.Reader(['ko'], gpu = False),
#easyocr.Reader(['th'], gpu = False),
]
basedir = "keyframes/"
dirs = os.listdir(basedir)
for d in dirs:
outfile = 'ocr/' + d + '.json'
if os.path.isfile(outfile):
print("found " + outfile + ", skipping")
continue
files = glob.glob(basedir + d + "/*.png")
ocr = {}
for f in files:
i = f.split("_")[-2]
img = cv2.imread(f)
results = []
for reader in readers:
results = results + reader.readtext(img)
h = list(filter(lambda result : len(result) > 2 and len(result[1]) > 0 and result[2] >= 0.1, results))
if len(h) > 0:
ocr[i] = h
with open(outfile,'w') as f:
json.dump(ocr, f, indent=1, default=convert)
print(d)
|
normal
|
{
"blob_id": "7057b882ca1ce2c08e9ba7add5f115636b9b319e",
"index": 8745,
"step-1": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\n<mask token>\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-3": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\nreaders = [easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu=\n False)]\nbasedir = 'keyframes/'\ndirs = os.listdir(basedir)\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-4": "import easyocr\nimport cv2\nimport json\nimport numpy as np\nimport os\nimport os.path\nimport glob\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\nreaders = [easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu=\n False)]\nbasedir = 'keyframes/'\ndirs = os.listdir(basedir)\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-5": "import easyocr\r\nimport cv2\r\nimport json\r\nimport numpy as np\r\nimport os\r\nimport os.path\r\nimport glob\r\n\r\ndef convert(o):\r\n if isinstance(o, np.generic): return o.item() \r\n raise TypeError\r\n\r\nreaders = [\r\n easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu = False),\r\n #easyocr.Reader(['ch_tra'], gpu = False),\r\n #easyocr.Reader(['fa'], gpu = False),\r\n #easyocr.Reader(['hi'], gpu = False), \r\n #easyocr.Reader(['ja'], gpu = False), \r\n #easyocr.Reader(['ko'], gpu = False),\r\n #easyocr.Reader(['th'], gpu = False),\r\n]\r\n\r\nbasedir = \"keyframes/\"\r\n\r\ndirs = os.listdir(basedir)\r\n\r\n\r\nfor d in dirs:\r\n\r\n outfile = 'ocr/' + d + '.json'\r\n if os.path.isfile(outfile):\r\n print(\"found \" + outfile + \", skipping\")\r\n continue\r\n \r\n files = glob.glob(basedir + d + \"/*.png\")\r\n \r\n ocr = {}\r\n\r\n for f in files:\r\n i = f.split(\"_\")[-2]\r\n img = cv2.imread(f)\r\n \r\n results = []\r\n for reader in readers:\r\n results = results + reader.readtext(img)\r\n \r\n h = list(filter(lambda result : len(result) > 2 and len(result[1]) > 0 and result[2] >= 0.1, results))\r\n \r\n if len(h) > 0:\r\n ocr[i] = h\r\n \r\n with open(outfile,'w') as f: \r\n json.dump(ocr, f, indent=1, default=convert)\r\n \r\n print(d)\r\n \r\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from utils import *
import math
class State:
"This class represents the search state that will be used for ARA* search"
def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2*math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
|
normal
|
{
"blob_id": "c8f899958ce19e7e2bf1307a685e65873695f140",
"index": 9028,
"step-1": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n <mask token>\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-2": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-3": "<mask token>\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-4": "from utils import *\nimport math\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-5": "from utils import *\nimport math\n\nclass State:\n \"This class represents the search state that will be used for ARA* search\"\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2*math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n def setH(self, h):\n self.h = h\n def setParent(self, parent):\n self.parent = parent\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
import sys
sys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')
from numpy import sin, linspace
x = linspace(0, 4, 101)
y = sin(x)
from numpy import sin, linspace
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')
plt.plot(x, y2)
plt.plot(x, y2, color = "#530000")
y1=x
plt.plot(x, y1, color = "#530000")
y2 = y1 - x*x*x/(1*2*3)
plt.plot(x, y2, color = "#530000")
plt.show()
|
normal
|
{
"blob_id": "1dcea61908753777604d99235407981e89c3b9d4",
"index": 4452,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n<mask token>\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\n<mask token>\nplt.plot(x, y1, color='#530000')\n<mask token>\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-3": "<mask token>\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n<mask token>\nx = linspace(0, 4, 101)\ny = sin(x)\n<mask token>\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\ny1 = x\nplt.plot(x, y1, color='#530000')\ny2 = y1 - x * x * x / (1 * 2 * 3)\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-4": "import sys\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\nfrom numpy import sin, linspace\nx = linspace(0, 4, 101)\ny = sin(x)\nfrom numpy import sin, linspace\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\ny1 = x\nplt.plot(x, y1, color='#530000')\ny2 = y1 - x * x * x / (1 * 2 * 3)\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-5": "import sys\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n\nfrom numpy import sin, linspace\nx = linspace(0, 4, 101)\ny = sin(x)\n\nfrom numpy import sin, linspace\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color = \"#530000\")\n\ny1=x\nplt.plot(x, y1, color = \"#530000\")\n\ny2 = y1 - x*x*x/(1*2*3)\nplt.plot(x, y2, color = \"#530000\")\n\nplt.show()\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from test.test_unicode_file_functions import filenames
def writeUniquerecords(dirpath,filenames):
sourcepath=os.path.join(dirpath,filenames)
with open(sourcepath,'r') as fp:
lines= fp.readlines()
destination_lines=[]
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile='/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
destipath=os.path.join(destinationfile,filenames)
with open(destipath, "w+")as destination:
destination.write("\n".join(destination_lines))
def Readandwrite():
for dirpath,dirnames,filenames in os.walk('/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'):
print('Current Path',dirpath)
print('Current Folder names',dirnames)
print('Current Files names ',filenames)
for file in filenames:
writeUniquerecords(dirpath,file)
Readandwrite()
|
normal
|
{
"blob_id": "4ed730369cf065936569a8515de44042829c2143",
"index": 1201,
"step-1": "<mask token>\n\n\ndef writeUniquerecords(dirpath, filenames):\n sourcepath = os.path.join(dirpath, filenames)\n with open(sourcepath, 'r') as fp:\n lines = fp.readlines()\n destination_lines = []\n for line in lines:\n if line not in destination_lines:\n destination_lines.append(line)\n destinationfile = (\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'\n )\n destipath = os.path.join(destinationfile, filenames)\n with open(destipath, 'w+') as destination:\n destination.write('\\n'.join(destination_lines))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef writeUniquerecords(dirpath, filenames):\n sourcepath = os.path.join(dirpath, filenames)\n with open(sourcepath, 'r') as fp:\n lines = fp.readlines()\n destination_lines = []\n for line in lines:\n if line not in destination_lines:\n destination_lines.append(line)\n destinationfile = (\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'\n )\n destipath = os.path.join(destinationfile, filenames)\n with open(destipath, 'w+') as destination:\n destination.write('\\n'.join(destination_lines))\n\n\ndef Readandwrite():\n for dirpath, dirnames, filenames in os.walk(\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'\n ):\n print('Current Path', dirpath)\n print('Current Folder names', dirnames)\n print('Current Files names ', filenames)\n for file in filenames:\n writeUniquerecords(dirpath, file)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef writeUniquerecords(dirpath, filenames):\n sourcepath = os.path.join(dirpath, filenames)\n with open(sourcepath, 'r') as fp:\n lines = fp.readlines()\n destination_lines = []\n for line in lines:\n if line not in destination_lines:\n destination_lines.append(line)\n destinationfile = (\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'\n )\n destipath = os.path.join(destinationfile, filenames)\n with open(destipath, 'w+') as destination:\n destination.write('\\n'.join(destination_lines))\n\n\ndef Readandwrite():\n for dirpath, dirnames, filenames in os.walk(\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'\n ):\n print('Current Path', dirpath)\n print('Current Folder names', dirnames)\n print('Current Files names ', filenames)\n for file in filenames:\n writeUniquerecords(dirpath, file)\n\n\nReadandwrite()\n",
"step-4": "import os\nfrom test.test_unicode_file_functions import filenames\n\n\ndef writeUniquerecords(dirpath, filenames):\n sourcepath = os.path.join(dirpath, filenames)\n with open(sourcepath, 'r') as fp:\n lines = fp.readlines()\n destination_lines = []\n for line in lines:\n if line not in destination_lines:\n destination_lines.append(line)\n destinationfile = (\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'\n )\n destipath = os.path.join(destinationfile, filenames)\n with open(destipath, 'w+') as destination:\n destination.write('\\n'.join(destination_lines))\n\n\ndef Readandwrite():\n for dirpath, dirnames, filenames in os.walk(\n '/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'\n ):\n print('Current Path', dirpath)\n print('Current Folder names', dirnames)\n print('Current Files names ', filenames)\n for file in filenames:\n writeUniquerecords(dirpath, file)\n\n\nReadandwrite()\n",
"step-5": "import os\nfrom test.test_unicode_file_functions import filenames\n\n\ndef writeUniquerecords(dirpath,filenames):\n sourcepath=os.path.join(dirpath,filenames)\n with open(sourcepath,'r') as fp:\n lines= fp.readlines()\n destination_lines=[]\n for line in lines:\n if line not in destination_lines:\n destination_lines.append(line)\n \n destinationfile='/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'\n destipath=os.path.join(destinationfile,filenames)\n with open(destipath, \"w+\")as destination:\n destination.write(\"\\n\".join(destination_lines)) \n\n\ndef Readandwrite():\n for dirpath,dirnames,filenames in os.walk('/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'):\n print('Current Path',dirpath)\n print('Current Folder names',dirnames)\n print('Current Files names ',filenames)\n for file in filenames:\n writeUniquerecords(dirpath,file)\n \n \n\nReadandwrite()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
'''
一球从100米高度自由落下
每次落地后反跳回原高度的一半;再落下,求它在第10次落地时,共经过多少米?第10次反弹多高?
求两个东西, 1是经过了多少米, 2是反弹多高
1: 100 100+50+50 100+50+50+25+25
2: 100 100/2=50 50/2=25 25/2=2
'''
import math
start_height = 100
rebound_rate = 0.5
meter_list = [100]
def rebound(time):
m = start_height*(rebound_rate ** (time))
return m
'''
1.第一次落地, 经过了100米
2.第二次落地, 经过了100+50+50米
3.第三次落地, 经过了100+50+50+25+25米
'''
def get_all_meter(time):
for k in range(1, time):
meter = start_height + rebound(time-1)*2
meter_list.append(meter)
def main():
#print rebound(10)
print get_all_meter(11)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f25d86e857970854b2239ce0ab5280132b89280e",
"index": 6900,
"step-1": "# -*- coding: utf-8 -*-\n'''\n 一球从100米高度自由落下\n 每次落地后反跳回原高度的一半;再落下,求它在第10次落地时,共经过多少米?第10次反弹多高?\n\n 求两个东西, 1是经过了多少米, 2是反弹多高\n 1: 100 100+50+50 100+50+50+25+25\n 2: 100 100/2=50 50/2=25 25/2=2\n'''\nimport math\n\nstart_height = 100\nrebound_rate = 0.5\nmeter_list = [100]\n\ndef rebound(time):\n m = start_height*(rebound_rate ** (time))\n return m\n\n'''\n 1.第一次落地, 经过了100米\n 2.第二次落地, 经过了100+50+50米\n 3.第三次落地, 经过了100+50+50+25+25米\n'''\ndef get_all_meter(time):\n for k in range(1, time):\n meter = start_height + rebound(time-1)*2\n meter_list.append(meter)\n\ndef main():\n #print rebound(10)\n print get_all_meter(11)\n\nif __name__ == '__main__':\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import MySQLdb
from MySQLdb import escape_string as thwart
"""
"""
class DatabaseConnection:
def __init__(self, address, user, password, database):
self.address = address
self.user = user
self.password = password
self.database = database
"""
"""
def connect(self):
self.conn = MySQLdb.connect(host=self.address,
port=3306,
user=self.user,
passwd=self.password,
db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute("INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)", (thwart(email), thwart(number),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute("DELETE from User WHERE email = (%s)", (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute("SELECT * FROM spammail WHERE idEmail = %s", (thwart(mailID),))
print(no)
if no == 0:
c.execute("INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)", (1, thwart(repo), thwart(mailID),))
else:
c.execute("SELECT numClicked FROM spammail WHERE idEmail = %s", (thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute("UPDATE spammail SET numClicked = %s WHERE idEmail = %s", (no+1, thwart(mailID),))
conn.commit()
self.disconnect()
print("here")
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute("SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1")
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
|
normal
|
{
"blob_id": "c6502d6b589fa75dfbd5946a1097e77fc0b472c4",
"index": 1126,
"step-1": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n <mask token>\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-3": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (\n thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\n 'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'\n , (1, thwart(repo), thwart(mailID)))\n else:\n c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',\n (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\n 'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',\n (no + 1, thwart(mailID)))\n conn.commit()\n self.disconnect()\n print('here')\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-4": "<mask token>\n\n\nclass DatabaseConnection:\n\n def __init__(self, address, user, password, database):\n self.address = address\n self.user = user\n self.password = password\n self.database = database\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (\n thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\n 'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'\n , (1, thwart(repo), thwart(mailID)))\n else:\n c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',\n (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\n 'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',\n (no + 1, thwart(mailID)))\n conn.commit()\n self.disconnect()\n print('here')\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-5": "import MySQLdb\nfrom MySQLdb import escape_string as thwart\n\"\"\"\n\"\"\"\nclass DatabaseConnection:\n\n def __init__(self, address, user, password, database):\n self.address = address\n self.user = user\n self.password = password\n self.database = database\n\n \"\"\"\n \n \"\"\"\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address,\n port=3306,\n user=self.user,\n passwd=self.password,\n db=self.database)\n\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\"INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)\", (thwart(email), thwart(number),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute(\"DELETE from User WHERE email = (%s)\", (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute(\"SELECT * FROM spammail WHERE idEmail = %s\", (thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\"INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)\", (1, thwart(repo), thwart(mailID),))\n else:\n c.execute(\"SELECT numClicked FROM spammail WHERE idEmail = %s\", (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\"UPDATE spammail SET numClicked = %s WHERE idEmail = %s\", (no+1, thwart(mailID),))\n\n conn.commit()\n self.disconnect()\n print(\"here\")\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\"SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1\")\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
from manimlib.imports import *
class A_Scroller(Scene):
CONFIG={
"camera_config":{"background_color":"#FFFFFF"}
}
def construct(self):
text_1 = Text("3493", color="#DC3832")
text_2 = Text("3646", color="#221F20").shift(2*RIGHT)
text_3 = Text("4182", color="#2566AD").shift(4*RIGHT)
text_4 = Text("16417", color="#DC3832").shift(6*RIGHT)
text_5 = Text("18209", color="#221F20").shift(8*RIGHT)
text_6 = Text("18569", color="#2566AD").shift(10*RIGHT)
text_7 = Text("22229", color="#DC3832").shift(12*RIGHT)
text_8 = Text("24928", color="#221F20").shift(14*RIGHT)
text_9 = Text("26827", color="#2566AD").shift(16*RIGHT)
text_10 = Text("29779", color="#DC3832").shift(18*RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)
text_11 = Text("30898", color="#221F20").shift(DOWN)
text_12 = Text("31568", color="#2566AD").shift(2*RIGHT+DOWN)
text_13 = Text("32075", color="#DC3832").shift(4*RIGHT+DOWN)
text_14 = Text("32777", color="#221F20").shift(6*RIGHT+DOWN)
text_15 = Text("33959", color="#2566AD").shift(8*RIGHT+DOWN)
text_16 = Text("35450", color="#DC3832").shift(10*RIGHT+DOWN)
text_17 = Text("37680", color="#221F20").shift(12*RIGHT+DOWN)
text_18 = Text("38268", color="#2566AD").shift(14*RIGHT+DOWN)
text_19 = Text("38269", color="#DC3832").shift(16*RIGHT+DOWN)
text_20 = Text("38849", color="#221F20").shift(18*RIGHT+DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)
text_21 = Text("44204", color="#2566AD").shift(2*DOWN)
text_22 = Text("44798", color="#DC3832").shift(2*RIGHT+2*DOWN)
text_23 = Text("44814", color="#221F20").shift(4*RIGHT+2*DOWN)
text_24 = Text("45084", color="#2566AD").shift(6*RIGHT+2*DOWN)
text_25 = Text("45252", color="#DC3832").shift(8*RIGHT+2*DOWN)
text_26 = Text("46041", color="#221F20").shift(10*RIGHT+2*DOWN)
text_27 = Text("46380", color="#2566AD").shift(12*RIGHT+2*DOWN)
text_28 = Text("47891", color="#DC3832").shift(14*RIGHT+2*DOWN)
text_29 = Text("51126", color="#221F20").shift(16*RIGHT+2*DOWN)
text_30 = Text("51599", color="#2566AD").shift(18*RIGHT+2*DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2*UP).shift(20*RIGHT)
all_numbers_2.move_to(2*UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)
|
normal
|
{
"blob_id": "97c97f18d1b93dc54538a0df7badafd961fdcb9c",
"index": 3588,
"step-1": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-3": "<mask token>\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-4": "from manimlib.imports import *\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-5": "from manimlib.imports import *\n\nclass A_Scroller(Scene):\n CONFIG={\n \"camera_config\":{\"background_color\":\"#FFFFFF\"}\n }\n def construct(self):\n text_1 = Text(\"3493\", color=\"#DC3832\")\n text_2 = Text(\"3646\", color=\"#221F20\").shift(2*RIGHT)\n text_3 = Text(\"4182\", color=\"#2566AD\").shift(4*RIGHT)\n text_4 = Text(\"16417\", color=\"#DC3832\").shift(6*RIGHT)\n text_5 = Text(\"18209\", color=\"#221F20\").shift(8*RIGHT)\n text_6 = Text(\"18569\", color=\"#2566AD\").shift(10*RIGHT)\n text_7 = Text(\"22229\", color=\"#DC3832\").shift(12*RIGHT)\n text_8 = Text(\"24928\", color=\"#221F20\").shift(14*RIGHT)\n text_9 = Text(\"26827\", color=\"#2566AD\").shift(16*RIGHT)\n text_10 = Text(\"29779\", color=\"#DC3832\").shift(18*RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)\n\n text_11 = Text(\"30898\", color=\"#221F20\").shift(DOWN)\n text_12 = Text(\"31568\", color=\"#2566AD\").shift(2*RIGHT+DOWN)\n text_13 = Text(\"32075\", color=\"#DC3832\").shift(4*RIGHT+DOWN)\n text_14 = Text(\"32777\", color=\"#221F20\").shift(6*RIGHT+DOWN)\n text_15 = Text(\"33959\", color=\"#2566AD\").shift(8*RIGHT+DOWN)\n text_16 = Text(\"35450\", color=\"#DC3832\").shift(10*RIGHT+DOWN)\n text_17 = Text(\"37680\", color=\"#221F20\").shift(12*RIGHT+DOWN)\n text_18 = Text(\"38268\", color=\"#2566AD\").shift(14*RIGHT+DOWN)\n text_19 = Text(\"38269\", color=\"#DC3832\").shift(16*RIGHT+DOWN)\n text_20 = Text(\"38849\", color=\"#221F20\").shift(18*RIGHT+DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)\n\n text_21 = Text(\"44204\", color=\"#2566AD\").shift(2*DOWN)\n text_22 = Text(\"44798\", color=\"#DC3832\").shift(2*RIGHT+2*DOWN)\n text_23 = Text(\"44814\", color=\"#221F20\").shift(4*RIGHT+2*DOWN)\n text_24 = Text(\"45084\", color=\"#2566AD\").shift(6*RIGHT+2*DOWN)\n text_25 = Text(\"45252\", color=\"#DC3832\").shift(8*RIGHT+2*DOWN)\n text_26 = Text(\"46041\", color=\"#221F20\").shift(10*RIGHT+2*DOWN)\n text_27 = Text(\"46380\", color=\"#2566AD\").shift(12*RIGHT+2*DOWN)\n text_28 = Text(\"47891\", color=\"#DC3832\").shift(14*RIGHT+2*DOWN)\n text_29 = Text(\"51126\", color=\"#221F20\").shift(16*RIGHT+2*DOWN)\n text_30 = Text(\"51599\", color=\"#2566AD\").shift(18*RIGHT+2*DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)\n\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2*UP).shift(20*RIGHT)\n all_numbers_2.move_to(2*UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#coding=utf-8
i=1
s=0
while s<=8848:
s=s+(2**i)*0.2*10**(-3)
i=i+1
print '对折次数:',i
|
normal
|
{
"blob_id": "98a384392d0839ddf12f3374c05929bc5e32987b",
"index": 9242,
"step-1": "#coding=utf-8\ni=1\ns=0\nwhile s<=8848:\n\ts=s+(2**i)*0.2*10**(-3)\n\ti=i+1\nprint '对折次数:',i\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 23:54:17 2015
@author: rein
@license: MIT
@version: 0.1
"""
from __future__ import print_function
import numpy as np
import footballpy.processing.ragged_array as ra
""" Ranking dictionary necessary to determine the column number
of each player.
The type system depends on the type of the raw data.
Type A: Elaborate positioning scheme
Type B: Simple scheme
Type C: Amisco-scheme
"""
__position_ranking = {
'A': {
'TW':1, 'LV':2, 'IVL':3, 'IVZ':4, 'IVR':5, 'RV':6,
'DML':7, 'DMZ':8, 'DMR':9,
'LM':10, 'HL':11, 'MZ': 12, 'HR':13, 'RM':14,
'OLM':15, 'ZO':16, 'ORM':17,
'HST':18, 'LA':19, 'STL':20, 'STR':21, 'RA':22,
'STZ':23
},
'B': {
'G': 1, 'D': 2, 'M': 3, 'A': 4
},
'C': {
'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,
'forward': 4
}
}
def sort_position_data(pos,type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos,key=lambda player: ranking_type[player[2]])
def stitch_position_data(pos,ball,NO_PLAYERS=11):
"""Puts position data into a single array.
stitch_position_data does not change the ordering of the data and
stitches the position data together as given. Therefore, if the playing
position must be controlled sort_position_data must be called first.
Args:
pos: position data list (indexed ragged array)
ball: list with two matrices (1st and 2nd half)
NO_PLAYERS: default = 11
Returns:
output_fields:
"""
# magic numbers
_MISSING_ = -2.0**13
_NO_DIM_ = 2 # x- and y-coordinates
_POST_LOOK_ = 20
# end magic numbers
frames = ball[:,0]
min_frame = min(frames)
max_frame = max(frames)
no_frames = ball.shape[0]
if no_frames != (max_frame - min_frame + 1):
raise IndexError("No of ball frames doesn't match")
no_players_input = len(pos)
input_fields = ra.expand_indexed_ragged_array(pos, frames,
lambda x: x[1], _MISSING_)
input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,NO_PLAYERS*_NO_DIM_,_MISSING_)
output_fields = ra.condense_expanded_ragged_array(input_fields, missing_id = _MISSING_)
return output_fields
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:,0]) < 0 else 'r2l'
def switch_playing_direction(position_coords):
"""Switches the position coordinates.
Mirrors the position coordinates either from left to right or vice versa.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords: x-y position coordinates of the players.
Returns:
Nothing, the matrix coordinates are flipped in place.
"""
# just mirrors the x-coordinate in place
position_coords[:,0::2] *= -1
def rescale_playing_coords(position_coords,pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
# translate to bottom-left corner
position_coords[:,0::2] += pitch_length/2 # x-coordinates
position_coords[:,1::2] += pitch_width/2 # y-coordinates
# rescale to [0,10]
position_coords[:,0::2] *= 10.0/pitch_length # x-coordinates
position_coords[:,1::2] *= 10.0/pitch_width # y-coordinates
def clamp_values(result,vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht<vmin] = vmin
ht[ht>vmax] = vmax
def run(pos_data,ball_data,match,ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home','guest']
sections = ['1st','2nd']
result = {'home':[0]*2, 'guest':[0]*2, 'ball':[0]*2}
# switch for l2r switching mode
l2r_section = 0
# processing player position data first
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role,sec))
sorted_pos_data = sort_position_data(pos_data[role][sec], ranking_type)
stitched_data = stitch_position_data(sorted_pos_data,ball_data[sec!='1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[:,0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec=='1st' else 1
rescale_playing_coords(stitched_data,match['stadium'])
result[role][0 if sec=='1st' else 1] = stitched_data
print('done')
# processing ball data
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:,1:3])
for i in [0,1]:
rescale_playing_coords(ball_data[i][:,1:3],match['stadium'])
result['ball'][0] = ball_data[0][:,1:3]
result['ball'][1] = ball_data[1][:,1:3]
#correct value ranges.
print('clamping values.')
clamp_values(result)
print('done.')
return result
if __name__ == '__main__':
#teams, match, pos_data,ball_data
section = '2nd'
kk = pos_data['home'][section]
kks = sort_position_data(kk)
bb = ball_data[section!='1st']
ss = stitch_position_data(kks,bb)
data_transformed = run(pos_data,ball_data,match)
|
normal
|
{
"blob_id": "81ae5bbc8e3e712ee4f54656bc28f385a0b4a29f",
"index": 6059,
"step-1": "<mask token>\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\n<mask token>\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\n<mask token>\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-3": "<mask token>\n__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,\n 'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,\n 'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,\n 'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M': \n 3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,\n 'forward': 4}}\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport numpy as np\nimport footballpy.processing.ragged_array as ra\n<mask token>\n__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,\n 'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,\n 'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,\n 'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M': \n 3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,\n 'forward': 4}}\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 23:54:17 2015\n\n@author: rein\n@license: MIT\n@version: 0.1\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport footballpy.processing.ragged_array as ra\n\n\"\"\" Ranking dictionary necessary to determine the column number\n of each player.\n \n The type system depends on the type of the raw data.\n Type A: Elaborate positioning scheme\n Type B: Simple scheme\n Type C: Amisco-scheme\n\"\"\"\n__position_ranking = {\n 'A': {\n 'TW':1, 'LV':2, 'IVL':3, 'IVZ':4, 'IVR':5, 'RV':6,\n 'DML':7, 'DMZ':8, 'DMR':9,\n 'LM':10, 'HL':11, 'MZ': 12, 'HR':13, 'RM':14,\n 'OLM':15, 'ZO':16, 'ORM':17,\n 'HST':18, 'LA':19, 'STL':20, 'STR':21, 'RA':22,\n 'STZ':23\n },\n 'B': {\n 'G': 1, 'D': 2, 'M': 3, 'A': 4 \n },\n 'C': {\n 'goalie': 1, 'defenseman': 2, 'mid-fielder': 3, \n 'forward': 4\n }\n}\n\n\ndef sort_position_data(pos,type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos,key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos,ball,NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n # magic numbers\n _MISSING_ = -2.0**13\n _NO_DIM_ = 2 # x- and y-coordinates\n _POST_LOOK_ = 20\n # end magic numbers\n \n frames = ball[:,0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != (max_frame - min_frame + 1):\n raise IndexError(\"No of ball frames doesn't match\")\n \n no_players_input = len(pos)\n\n input_fields = ra.expand_indexed_ragged_array(pos, frames, \n lambda x: x[1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,NO_PLAYERS*_NO_DIM_,_MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields, missing_id = _MISSING_)\n \n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:,0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n # just mirrors the x-coordinate in place\n position_coords[:,0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords,pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n # translate to bottom-left corner\n position_coords[:,0::2] += pitch_length/2 # x-coordinates\n position_coords[:,1::2] += pitch_width/2 # y-coordinates\n # rescale to [0,10]\n position_coords[:,0::2] *= 10.0/pitch_length # x-coordinates\n position_coords[:,1::2] *= 10.0/pitch_width # y-coordinates\n\n\ndef clamp_values(result,vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht<vmin] = vmin\n ht[ht>vmax] = vmax\n\n\ndef run(pos_data,ball_data,match,ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home','guest']\n sections = ['1st','2nd']\n result = {'home':[0]*2, 'guest':[0]*2, 'ball':[0]*2}\n \n # switch for l2r switching mode\n l2r_section = 0\n\n # processing player position data first \n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role,sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec], ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data,ball_data[sec!='1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[:,0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec=='1st' else 1\n rescale_playing_coords(stitched_data,match['stadium'])\n result[role][0 if sec=='1st' else 1] = stitched_data\n print('done')\n \n # processing ball data\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:,1:3])\n for i in [0,1]:\n rescale_playing_coords(ball_data[i][:,1:3],match['stadium'])\n result['ball'][0] = ball_data[0][:,1:3]\n result['ball'][1] = ball_data[1][:,1:3]\n\n #correct value ranges.\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n \n \nif __name__ == '__main__':\n#teams, match, pos_data,ball_data\n section = '2nd'\n kk = pos_data['home'][section] \n kks = sort_position_data(kk)\n bb = ball_data[section!='1st']\n ss = stitch_position_data(kks,bb)\n data_transformed = run(pos_data,ball_data,match)\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch(app_manager.RyuApp):
# TODO define OpenFlow 1.0 version for the switch
# add your code here
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, src, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port,
dl_dst=haddr_to_bin(dst), dl_src=haddr_to_bin(src))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
# TODO send modified message out
# add your code here
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
if eth.ethertype == ether_types.ETH_TYPE_IPV6:
# ignore ipv6 packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s", dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
# TODO define the action for output
# add your code here
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.logger.info("add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] ", dpid, src, dst, msg.in_port, out_port)
self.add_flow(datapath, msg.in_port, dst, src, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
# TODO define the OpenFlow Packet Out
# add your code here
print ("PACKET_OUT...")
|
normal
|
{
"blob_id": "86d032a3cd67118eb46073c996f1c9a391f8dfe0",
"index": 1608,
"step-1": "<mask token>\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n <mask token>\n <mask token>\n <mask token>\n print('PACKET_OUT...')\n",
"step-2": "<mask token>\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n <mask token>\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n return\n if eth.ethertype == ether_types.ETH_TYPE_IPV6:\n return\n dst = eth.dst\n src = eth.src\n dpid = datapath.id\n self.mac_to_port.setdefault(dpid, {})\n self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',\n dpid, src, dst, msg.in_port)\n self.mac_to_port[dpid][src] = msg.in_port\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n if out_port != ofproto.OFPP_FLOOD:\n self.logger.info(\n 'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '\n , dpid, src, dst, msg.in_port, out_port)\n self.add_flow(datapath, msg.in_port, dst, src, actions)\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n print('PACKET_OUT...')\n",
"step-3": "<mask token>\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n\n def add_flow(self, datapath, in_port, dst, src, actions):\n ofproto = datapath.ofproto\n match = datapath.ofproto_parser.OFPMatch(in_port=in_port, dl_dst=\n haddr_to_bin(dst), dl_src=haddr_to_bin(src))\n mod = datapath.ofproto_parser.OFPFlowMod(datapath=datapath, match=\n match, cookie=0, command=ofproto.OFPFC_ADD, idle_timeout=0,\n hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY, flags=\n ofproto.OFPFF_SEND_FLOW_REM, actions=actions)\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n return\n if eth.ethertype == ether_types.ETH_TYPE_IPV6:\n return\n dst = eth.dst\n src = eth.src\n dpid = datapath.id\n self.mac_to_port.setdefault(dpid, {})\n self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',\n dpid, src, dst, msg.in_port)\n self.mac_to_port[dpid][src] = msg.in_port\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n if out_port != ofproto.OFPP_FLOOD:\n self.logger.info(\n 'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '\n , dpid, src, dst, msg.in_port, out_port)\n self.add_flow(datapath, msg.in_port, dst, src, actions)\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n print('PACKET_OUT...')\n",
"step-4": "from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_0\nfrom ryu.lib.mac import haddr_to_bin\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ether_types\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n\n def add_flow(self, datapath, in_port, dst, src, actions):\n ofproto = datapath.ofproto\n match = datapath.ofproto_parser.OFPMatch(in_port=in_port, dl_dst=\n haddr_to_bin(dst), dl_src=haddr_to_bin(src))\n mod = datapath.ofproto_parser.OFPFlowMod(datapath=datapath, match=\n match, cookie=0, command=ofproto.OFPFC_ADD, idle_timeout=0,\n hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY, flags=\n ofproto.OFPFF_SEND_FLOW_REM, actions=actions)\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n return\n if eth.ethertype == ether_types.ETH_TYPE_IPV6:\n return\n dst = eth.dst\n src = eth.src\n dpid = datapath.id\n self.mac_to_port.setdefault(dpid, {})\n self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',\n dpid, src, dst, msg.in_port)\n self.mac_to_port[dpid][src] = msg.in_port\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n if out_port != ofproto.OFPP_FLOOD:\n self.logger.info(\n 'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '\n , dpid, src, dst, msg.in_port, out_port)\n self.add_flow(datapath, msg.in_port, dst, src, actions)\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n print('PACKET_OUT...')\n",
"step-5": "from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_0\n\nfrom ryu.lib.mac import haddr_to_bin\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ether_types\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n\t# TODO define OpenFlow 1.0 version for the switch\n\t# add your code here\n\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SimpleSwitch, self).__init__(*args, **kwargs)\n\t\tself.mac_to_port = {}\n \n \n\tdef add_flow(self, datapath, in_port, dst, src, actions):\n\t\tofproto = datapath.ofproto\n\n\t\tmatch = datapath.ofproto_parser.OFPMatch(\n in_port=in_port,\n dl_dst=haddr_to_bin(dst), dl_src=haddr_to_bin(src))\n\n\t\tmod = datapath.ofproto_parser.OFPFlowMod(\n datapath=datapath, match=match, cookie=0,\n command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,\n priority=ofproto.OFP_DEFAULT_PRIORITY,\n flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)\n\t\t# TODO send modified message out\n\t\t# add your code here\n\n\t@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n\tdef _packet_in_handler(self, ev):\n\t\tmsg = ev.msg\n\t\tdatapath = msg.datapath\n\t\tofproto = datapath.ofproto\n\n\t\tpkt = packet.Packet(msg.data)\n\t\teth = pkt.get_protocol(ethernet.ethernet)\n\n\t\tif eth.ethertype == ether_types.ETH_TYPE_LLDP:\n\t\t\t# ignore lldp packet\n\t\t\treturn\n\t\tif eth.ethertype == ether_types.ETH_TYPE_IPV6:\n\t\t\t# ignore ipv6 packet\n\t\t\treturn \n\t\t\n\t\tdst = eth.dst\n\t\tsrc = eth.src\n\t\tdpid = datapath.id\n\t\tself.mac_to_port.setdefault(dpid, {})\n\n\t\tself.logger.info(\"packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s\", dpid, src, dst, msg.in_port)\n\n\t\t# learn a mac address to avoid FLOOD next time.\n\t\tself.mac_to_port[dpid][src] = msg.in_port\n\n\t\tif dst in self.mac_to_port[dpid]:\n\t\t\tout_port = self.mac_to_port[dpid][dst]\n\t\telse:\n\t\t\tout_port = ofproto.OFPP_FLOOD\n\n\t\t# TODO define the action for output\n\t\t# add your code here\n\n\n # install a flow to avoid packet_in next time\n\t\tif out_port != ofproto.OFPP_FLOOD:\n\t\t\tself.logger.info(\"add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] \", dpid, src, dst, msg.in_port, out_port)\n\t\t\tself.add_flow(datapath, msg.in_port, dst, src, actions)\n\n\t\tdata = None\n\t\tif msg.buffer_id == ofproto.OFP_NO_BUFFER:\n\t\t\tdata = msg.data\n \n\n\t\t# TODO define the OpenFlow Packet Out\n\t\t# add your code here\n\n\tprint (\"PACKET_OUT...\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
def fibonacci(n):
'''returns the nth number of the Fibonacci
sequence. where the first position is indexed at 0.
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less than 2 then just get that number from the list.
if n <= 1:
#return list item at n
return fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return fib[1]
def lucas(n):
'''returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
return luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return luke[1]
def sum_series(n, x = 0, y = 1):
'''sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''
###Fibonacci sequence calculator....
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
fibnum = fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
fibnum = fib[1]
###Lucas sequence calculator...
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
lukenum = luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
lukenum = luke[1]
###Foo sequence
#these are the first two numbers in the foo sequence.
foo = [3,2]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
foonum = foo[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = foo[0] + foo[1]
#shift all the numbers in the list one position to the left.
foo = [foo[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x==3 and y ==2:
return foonum
else:
return fibnum
|
normal
|
{
"blob_id": "ca75e23d91eef8a5c5b78c0ea7c903b80640af25",
"index": 7957,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-3": "<mask token>\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-4": "def fibonacci(n):\n \"\"\"returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0\"\"\"\n fib = [0, 1]\n if n <= 1:\n return fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n return fib[1]\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-5": "def fibonacci(n):\n '''returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less than 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return fib[1]\n \ndef lucas(n):\n '''returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return luke[1]\n \n\n\ndef sum_series(n, x = 0, y = 1):\n\n '''sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''\n \n ###Fibonacci sequence calculator....\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n fibnum = fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n fibnum = fib[1] \n ###Lucas sequence calculator...\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n lukenum = luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n lukenum = luke[1] \n \n ###Foo sequence\n #these are the first two numbers in the foo sequence.\n foo = [3,2]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n foonum = foo[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = foo[0] + foo[1]\n #shift all the numbers in the list one position to the left.\n foo = [foo[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n foonum = foo[1] \n \n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x==3 and y ==2:\n return foonum\n else:\n return fibnum",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import json
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.types import VARCHAR,INT,FLOAT,BIGINT
import time
from tqdm import tqdm
#数据库联接设置
connect_info = 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4'
engine = create_engine(connect_info)
sql = '''
select * from smzdm;
'''
#从数据库中读取数据
df = pd.read_sql_query(sql, engine)
#排除字数小于5的评论
df_new = df[df['comment'].str.len()>=5]
#设置百度情感分析api
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'
response = requests.get(host)
if response:
print(response.json())
access_token = response.json()['access_token']
url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token
print(url)
headers={'Content-Type':'application/json'}
#情感分析函数
def sentiment(text):
global url
global headers
body={'text':text}
try:
r = requests.post(url,headers = headers,data=json.dumps(body))
dic=r.json()
except Exception as e:
print('分析失败')
pass
time.sleep(0.3)#设置分析频率,不设置引发QPS超限额错误
return dic['items'][0]['sentiment']
tqdm.pandas()
df_new_senti = df_new.copy()
df_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)#使用tqdm进度条
df_new_senti.sort_values(by='author',inplace=True)
df_new_senti['id']=df_new_senti.index
#保存到数据库
df_new_senti.to_sql(name = 'smzdm_senti',con = engine,if_exists = 'replace',index = False,dtype = {'id':BIGINT,'author': VARCHAR(length=255),'comment':VARCHAR(length=255),'sentiment':FLOAT(12,10)})
|
normal
|
{
"blob_id": "a95e64877a1fc9f8109f1293b4ae9176f4f64647",
"index": 3090,
"step-1": "<mask token>\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\n<mask token>\n",
"step-2": "<mask token>\nif response:\n print(response.json())\n<mask token>\nprint(url)\n<mask token>\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\n<mask token>\ndf_new_senti.sort_values(by='author', inplace=True)\n<mask token>\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n",
"step-3": "<mask token>\nconnect_info = (\n 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4')\nengine = create_engine(connect_info)\nsql = \"\"\"\n select * from smzdm;\n \"\"\"\ndf = pd.read_sql_query(sql, engine)\ndf_new = df[df['comment'].str.len() >= 5]\nhost = (\n 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\n )\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = (\n 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='\n + access_token)\nprint(url)\nheaders = {'Content-Type': 'application/json'}\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)\ndf_new_senti.sort_values(by='author', inplace=True)\ndf_new_senti['id'] = df_new_senti.index\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n",
"step-4": "import requests\nimport json\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import VARCHAR, INT, FLOAT, BIGINT\nimport time\nfrom tqdm import tqdm\nconnect_info = (\n 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4')\nengine = create_engine(connect_info)\nsql = \"\"\"\n select * from smzdm;\n \"\"\"\ndf = pd.read_sql_query(sql, engine)\ndf_new = df[df['comment'].str.len() >= 5]\nhost = (\n 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\n )\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = (\n 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='\n + access_token)\nprint(url)\nheaders = {'Content-Type': 'application/json'}\n\n\ndef sentiment(text):\n global url\n global headers\n body = {'text': text}\n try:\n r = requests.post(url, headers=headers, data=json.dumps(body))\n dic = r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)\n return dic['items'][0]['sentiment']\n\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)\ndf_new_senti.sort_values(by='author', inplace=True)\ndf_new_senti['id'] = df_new_senti.index\ndf_new_senti.to_sql(name='smzdm_senti', con=engine, if_exists='replace',\n index=False, dtype={'id': BIGINT, 'author': VARCHAR(length=255),\n 'comment': VARCHAR(length=255), 'sentiment': FLOAT(12, 10)})\n",
"step-5": "import requests \nimport json\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import VARCHAR,INT,FLOAT,BIGINT\nimport time\nfrom tqdm import tqdm\n#数据库联接设置\nconnect_info = 'mysql+pymysql://root:rootroot@localhost:3306/db1?charset=UTF8MB4'\nengine = create_engine(connect_info) \nsql = '''\n select * from smzdm;\n '''\n#从数据库中读取数据\ndf = pd.read_sql_query(sql, engine)\n#排除字数小于5的评论\ndf_new = df[df['comment'].str.len()>=5]\n#设置百度情感分析api\nhost = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=你的client_id&client_secret=你的client_secret'\nresponse = requests.get(host)\nif response:\n print(response.json())\naccess_token = response.json()['access_token']\nurl = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token\nprint(url)\nheaders={'Content-Type':'application/json'}\n\n#情感分析函数\ndef sentiment(text):\n global url\n global headers\n body={'text':text}\n try:\n r = requests.post(url,headers = headers,data=json.dumps(body))\n dic=r.json()\n except Exception as e:\n print('分析失败')\n pass\n time.sleep(0.3)#设置分析频率,不设置引发QPS超限额错误\n return dic['items'][0]['sentiment']\n\ntqdm.pandas()\ndf_new_senti = df_new.copy()\ndf_new_senti['sentiment'] = df_new['comment'].progress_apply(sentiment)#使用tqdm进度条\ndf_new_senti.sort_values(by='author',inplace=True)\ndf_new_senti['id']=df_new_senti.index\n#保存到数据库\ndf_new_senti.to_sql(name = 'smzdm_senti',con = engine,if_exists = 'replace',index = False,dtype = {'id':BIGINT,'author': VARCHAR(length=255),'comment':VARCHAR(length=255),'sentiment':FLOAT(12,10)})",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from admin_tools.dashboard.modules import DashboardModule
from nodes.models import Node
from slices.models import Slice
class MyThingsDashboardModule(DashboardModule):
"""
Controller dashboard module to provide an overview to
the user of the nodes and slices of its groups.
"""
title="My Things"
template = "dashboard/modules/mythings.html"
def init_with_context(self, context):
user = context['request'].user
# Get user slices
slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))
context['slices'] = slices
# Get user nodes
nodes = {}
nodes_states = ['offline', 'safe', 'production']
for group in user.groups.all():
nodes[group] = []
qs_nodes = Node.objects.filter(group=group)
for state in nodes_states:
nodes[group].append(qs_nodes.filter(state_set__value=state).count())
context['nodes_states'] = nodes_states
context['user_nodes'] = nodes
# initialize to calculate is_empty
self.has_data = nodes or slices
def is_empty(self):
return not self.has_data
|
normal
|
{
"blob_id": "90324392e763ac6ea78c77b909c4bea667d45e6c",
"index": 5896,
"step-1": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n <mask token>\n <mask token>\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-3": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-4": "from admin_tools.dashboard.modules import DashboardModule\nfrom nodes.models import Node\nfrom slices.models import Slice\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-5": "from admin_tools.dashboard.modules import DashboardModule\n\nfrom nodes.models import Node\nfrom slices.models import Slice\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title=\"My Things\"\n template = \"dashboard/modules/mythings.html\"\n \n def init_with_context(self, context):\n user = context['request'].user\n \n # Get user slices\n slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))\n context['slices'] = slices\n \n # Get user nodes\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state).count())\n \n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n \n # initialize to calculate is_empty\n self.has_data = nodes or slices\n \n def is_empty(self):\n return not self.has_data\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import pandas as pd
import requests
import re
from bs4 import BeautifulSoup
from datetime import datetime
nbaBoxUrl = 'https://www.basketball-reference.com/boxscores/'
boxScoreClass = 'stats_table'
def getBoxScoreLinks():
page = requests.get(nbaBoxUrl)
soup = BeautifulSoup(page.content, 'html.parser')
gameLinks = []
data = soup.findAll('td', {'class': 'right gamelink'})
for div in data:
links = div.findAll('a')
for a in links:
gameLinks.append(a['href'])
return gameLinks
def getBoxScoreTeams(soup):
data = soup.find('div', {'class': 'scorebox'})
substring = 'teams'
teams = []
team = {'name':'', 'abrv':'', 'table' : '', 'opponent' : ''}
for a in data.find_all('a', href=True):
if substring in a['href']:
new = team.copy()
new['name'] = a.getText()
new['abrv'] = a['href'].split('/')[2]
teams.append(new)
#set opponent
for team in teams:
for opponent in teams:
if team['name'] != opponent['name']:
team['opponent'] = opponent['name']
return teams
def getGameDate(soup):
for div in soup.find_all('div', {'class': 'scorebox_meta'}):
childdiv = div.find('div')
#format date
datetime_object = datetime.strptime(childdiv.string, '%I:%M %p, %B %d, %Y')
return datetime_object.strftime("%m/%d/%Y")
def getHomeTeam(url):
homeTeam = url.split('/')[4]
homeTeam = re.findall("[a-zA-Z]+", homeTeam)[0]
return homeTeam
def getGameId(url):
gameId = url.split('/')[4]
gameId = re.findall("\d+", gameId)[0]
return gameId
def getFileName(url):
fileName = url.split('/')[4]
fileName = fileName.rsplit( ".", 1 )[ 0 ]
return fileName
def removeSummaryRows(df):
df = df[df.Starters != 'Team Totals']
df = df[df.Starters != 'Reserves']
return df
def updateColumns(df):
df = df.drop('FG%', 1)
#rename
df = df.rename({'Starters': 'Players'}, axis=1)
return df
def replaceDNP(df):
df = df.replace('Did Not Play', 0)
return df
def orderColumns(df):
df = df[['Players', 'Team', 'Opponent', 'GameID', 'Date', 'Court', 'MP', 'FG', 'FGA', '3P', '3PA', 'FT', 'FTA', 'ORB', 'DRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']]
return df
def getGameBoxScore():
url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
#get teams
teams = getBoxScoreTeams(soup)
gameDate = getGameDate(soup)
homeTeam = getHomeTeam(url)
gameId = getGameId(url)
fileName = getFileName(url)
#Remove extra header
for div in soup.find_all("tr", {'class':'over_header'}):
div.decompose()
masterDf = pd.DataFrame()
for team in teams:
team['table'] = soup.find_all("table", {'id':'box-'+ team['abrv'] +'-game-basic'})
#format dataframe
df = pd.read_html(str(team['table']))[0]
#constants
df['Team'] = team['name']
df['Opponent'] = team['opponent']
df['Date'] = gameDate
df['GameID'] = gameId
if team['abrv'] == homeTeam:
df['Court'] = 'Home'
else:
df['Court'] = 'Away'
masterDf = pd.concat([masterDf, df], ignore_index=True)
#master_df = master_df.append(df,ignore_index=True)
#format dataframe
masterDf = removeSummaryRows(masterDf)
masterDf = replaceDNP(masterDf)
masterDf = updateColumns(masterDf)
masterDf = orderColumns(masterDf)
print(masterDf.head(2))
masterDf.to_csv(fileName + '.csv', index=False, sep='\t', encoding='utf-8')
#add footer row
with open(fileName + '.csv','a') as fd:
fd.write('\n')
fd.write('Sample Link:' + '\t' + url)
#gameLinks = getBoxScoreLinks()
getGameBoxScore()
|
normal
|
{
"blob_id": "2b3983fd6a8b31604d6d71dfca1d5b6c2c7105e0",
"index": 4818,
"step-1": "<mask token>\n\n\ndef getBoxScoreLinks():\n page = requests.get(nbaBoxUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n gameLinks = []\n data = soup.findAll('td', {'class': 'right gamelink'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n gameLinks.append(a['href'])\n return gameLinks\n\n\ndef getBoxScoreTeams(soup):\n data = soup.find('div', {'class': 'scorebox'})\n substring = 'teams'\n teams = []\n team = {'name': '', 'abrv': '', 'table': '', 'opponent': ''}\n for a in data.find_all('a', href=True):\n if substring in a['href']:\n new = team.copy()\n new['name'] = a.getText()\n new['abrv'] = a['href'].split('/')[2]\n teams.append(new)\n for team in teams:\n for opponent in teams:\n if team['name'] != opponent['name']:\n team['opponent'] = opponent['name']\n return teams\n\n\ndef getGameDate(soup):\n for div in soup.find_all('div', {'class': 'scorebox_meta'}):\n childdiv = div.find('div')\n datetime_object = datetime.strptime(childdiv.string,\n '%I:%M %p, %B %d, %Y')\n return datetime_object.strftime('%m/%d/%Y')\n\n\n<mask token>\n\n\ndef getGameId(url):\n gameId = url.split('/')[4]\n gameId = re.findall('\\\\d+', gameId)[0]\n return gameId\n\n\ndef getFileName(url):\n fileName = url.split('/')[4]\n fileName = fileName.rsplit('.', 1)[0]\n return fileName\n\n\ndef removeSummaryRows(df):\n df = df[df.Starters != 'Team Totals']\n df = df[df.Starters != 'Reserves']\n return df\n\n\n<mask token>\n\n\ndef replaceDNP(df):\n df = df.replace('Did Not Play', 0)\n return df\n\n\n<mask token>\n\n\ndef getGameBoxScore():\n url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n teams = getBoxScoreTeams(soup)\n gameDate = getGameDate(soup)\n homeTeam = getHomeTeam(url)\n gameId = getGameId(url)\n fileName = getFileName(url)\n for div in soup.find_all('tr', {'class': 'over_header'}):\n div.decompose()\n masterDf = pd.DataFrame()\n for team in teams:\n team['table'] = soup.find_all('table', {'id': 'box-' + team['abrv'] +\n '-game-basic'})\n df = pd.read_html(str(team['table']))[0]\n df['Team'] = team['name']\n df['Opponent'] = team['opponent']\n df['Date'] = gameDate\n df['GameID'] = gameId\n if team['abrv'] == homeTeam:\n df['Court'] = 'Home'\n else:\n df['Court'] = 'Away'\n masterDf = pd.concat([masterDf, df], ignore_index=True)\n masterDf = removeSummaryRows(masterDf)\n masterDf = replaceDNP(masterDf)\n masterDf = updateColumns(masterDf)\n masterDf = orderColumns(masterDf)\n print(masterDf.head(2))\n masterDf.to_csv(fileName + '.csv', index=False, sep='\\t', encoding='utf-8')\n with open(fileName + '.csv', 'a') as fd:\n fd.write('\\n')\n fd.write('Sample Link:' + '\\t' + url)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getBoxScoreLinks():\n page = requests.get(nbaBoxUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n gameLinks = []\n data = soup.findAll('td', {'class': 'right gamelink'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n gameLinks.append(a['href'])\n return gameLinks\n\n\ndef getBoxScoreTeams(soup):\n data = soup.find('div', {'class': 'scorebox'})\n substring = 'teams'\n teams = []\n team = {'name': '', 'abrv': '', 'table': '', 'opponent': ''}\n for a in data.find_all('a', href=True):\n if substring in a['href']:\n new = team.copy()\n new['name'] = a.getText()\n new['abrv'] = a['href'].split('/')[2]\n teams.append(new)\n for team in teams:\n for opponent in teams:\n if team['name'] != opponent['name']:\n team['opponent'] = opponent['name']\n return teams\n\n\ndef getGameDate(soup):\n for div in soup.find_all('div', {'class': 'scorebox_meta'}):\n childdiv = div.find('div')\n datetime_object = datetime.strptime(childdiv.string,\n '%I:%M %p, %B %d, %Y')\n return datetime_object.strftime('%m/%d/%Y')\n\n\n<mask token>\n\n\ndef getGameId(url):\n gameId = url.split('/')[4]\n gameId = re.findall('\\\\d+', gameId)[0]\n return gameId\n\n\ndef getFileName(url):\n fileName = url.split('/')[4]\n fileName = fileName.rsplit('.', 1)[0]\n return fileName\n\n\ndef removeSummaryRows(df):\n df = df[df.Starters != 'Team Totals']\n df = df[df.Starters != 'Reserves']\n return df\n\n\ndef updateColumns(df):\n df = df.drop('FG%', 1)\n df = df.rename({'Starters': 'Players'}, axis=1)\n return df\n\n\ndef replaceDNP(df):\n df = df.replace('Did Not Play', 0)\n return df\n\n\n<mask token>\n\n\ndef getGameBoxScore():\n url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n teams = getBoxScoreTeams(soup)\n gameDate = getGameDate(soup)\n homeTeam = getHomeTeam(url)\n gameId = getGameId(url)\n fileName = getFileName(url)\n for div in soup.find_all('tr', {'class': 'over_header'}):\n div.decompose()\n masterDf = pd.DataFrame()\n for team in teams:\n team['table'] = soup.find_all('table', {'id': 'box-' + team['abrv'] +\n '-game-basic'})\n df = pd.read_html(str(team['table']))[0]\n df['Team'] = team['name']\n df['Opponent'] = team['opponent']\n df['Date'] = gameDate\n df['GameID'] = gameId\n if team['abrv'] == homeTeam:\n df['Court'] = 'Home'\n else:\n df['Court'] = 'Away'\n masterDf = pd.concat([masterDf, df], ignore_index=True)\n masterDf = removeSummaryRows(masterDf)\n masterDf = replaceDNP(masterDf)\n masterDf = updateColumns(masterDf)\n masterDf = orderColumns(masterDf)\n print(masterDf.head(2))\n masterDf.to_csv(fileName + '.csv', index=False, sep='\\t', encoding='utf-8')\n with open(fileName + '.csv', 'a') as fd:\n fd.write('\\n')\n fd.write('Sample Link:' + '\\t' + url)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getBoxScoreLinks():\n page = requests.get(nbaBoxUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n gameLinks = []\n data = soup.findAll('td', {'class': 'right gamelink'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n gameLinks.append(a['href'])\n return gameLinks\n\n\ndef getBoxScoreTeams(soup):\n data = soup.find('div', {'class': 'scorebox'})\n substring = 'teams'\n teams = []\n team = {'name': '', 'abrv': '', 'table': '', 'opponent': ''}\n for a in data.find_all('a', href=True):\n if substring in a['href']:\n new = team.copy()\n new['name'] = a.getText()\n new['abrv'] = a['href'].split('/')[2]\n teams.append(new)\n for team in teams:\n for opponent in teams:\n if team['name'] != opponent['name']:\n team['opponent'] = opponent['name']\n return teams\n\n\ndef getGameDate(soup):\n for div in soup.find_all('div', {'class': 'scorebox_meta'}):\n childdiv = div.find('div')\n datetime_object = datetime.strptime(childdiv.string,\n '%I:%M %p, %B %d, %Y')\n return datetime_object.strftime('%m/%d/%Y')\n\n\ndef getHomeTeam(url):\n homeTeam = url.split('/')[4]\n homeTeam = re.findall('[a-zA-Z]+', homeTeam)[0]\n return homeTeam\n\n\ndef getGameId(url):\n gameId = url.split('/')[4]\n gameId = re.findall('\\\\d+', gameId)[0]\n return gameId\n\n\ndef getFileName(url):\n fileName = url.split('/')[4]\n fileName = fileName.rsplit('.', 1)[0]\n return fileName\n\n\ndef removeSummaryRows(df):\n df = df[df.Starters != 'Team Totals']\n df = df[df.Starters != 'Reserves']\n return df\n\n\ndef updateColumns(df):\n df = df.drop('FG%', 1)\n df = df.rename({'Starters': 'Players'}, axis=1)\n return df\n\n\ndef replaceDNP(df):\n df = df.replace('Did Not Play', 0)\n return df\n\n\ndef orderColumns(df):\n df = df[['Players', 'Team', 'Opponent', 'GameID', 'Date', 'Court', 'MP',\n 'FG', 'FGA', '3P', '3PA', 'FT', 'FTA', 'ORB', 'DRB', 'AST', 'STL',\n 'BLK', 'TOV', 'PF', 'PTS']]\n return df\n\n\ndef getGameBoxScore():\n url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n teams = getBoxScoreTeams(soup)\n gameDate = getGameDate(soup)\n homeTeam = getHomeTeam(url)\n gameId = getGameId(url)\n fileName = getFileName(url)\n for div in soup.find_all('tr', {'class': 'over_header'}):\n div.decompose()\n masterDf = pd.DataFrame()\n for team in teams:\n team['table'] = soup.find_all('table', {'id': 'box-' + team['abrv'] +\n '-game-basic'})\n df = pd.read_html(str(team['table']))[0]\n df['Team'] = team['name']\n df['Opponent'] = team['opponent']\n df['Date'] = gameDate\n df['GameID'] = gameId\n if team['abrv'] == homeTeam:\n df['Court'] = 'Home'\n else:\n df['Court'] = 'Away'\n masterDf = pd.concat([masterDf, df], ignore_index=True)\n masterDf = removeSummaryRows(masterDf)\n masterDf = replaceDNP(masterDf)\n masterDf = updateColumns(masterDf)\n masterDf = orderColumns(masterDf)\n print(masterDf.head(2))\n masterDf.to_csv(fileName + '.csv', index=False, sep='\\t', encoding='utf-8')\n with open(fileName + '.csv', 'a') as fd:\n fd.write('\\n')\n fd.write('Sample Link:' + '\\t' + url)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef getBoxScoreLinks():\n page = requests.get(nbaBoxUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n gameLinks = []\n data = soup.findAll('td', {'class': 'right gamelink'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n gameLinks.append(a['href'])\n return gameLinks\n\n\ndef getBoxScoreTeams(soup):\n data = soup.find('div', {'class': 'scorebox'})\n substring = 'teams'\n teams = []\n team = {'name': '', 'abrv': '', 'table': '', 'opponent': ''}\n for a in data.find_all('a', href=True):\n if substring in a['href']:\n new = team.copy()\n new['name'] = a.getText()\n new['abrv'] = a['href'].split('/')[2]\n teams.append(new)\n for team in teams:\n for opponent in teams:\n if team['name'] != opponent['name']:\n team['opponent'] = opponent['name']\n return teams\n\n\ndef getGameDate(soup):\n for div in soup.find_all('div', {'class': 'scorebox_meta'}):\n childdiv = div.find('div')\n datetime_object = datetime.strptime(childdiv.string,\n '%I:%M %p, %B %d, %Y')\n return datetime_object.strftime('%m/%d/%Y')\n\n\ndef getHomeTeam(url):\n homeTeam = url.split('/')[4]\n homeTeam = re.findall('[a-zA-Z]+', homeTeam)[0]\n return homeTeam\n\n\ndef getGameId(url):\n gameId = url.split('/')[4]\n gameId = re.findall('\\\\d+', gameId)[0]\n return gameId\n\n\ndef getFileName(url):\n fileName = url.split('/')[4]\n fileName = fileName.rsplit('.', 1)[0]\n return fileName\n\n\ndef removeSummaryRows(df):\n df = df[df.Starters != 'Team Totals']\n df = df[df.Starters != 'Reserves']\n return df\n\n\ndef updateColumns(df):\n df = df.drop('FG%', 1)\n df = df.rename({'Starters': 'Players'}, axis=1)\n return df\n\n\ndef replaceDNP(df):\n df = df.replace('Did Not Play', 0)\n return df\n\n\ndef orderColumns(df):\n df = df[['Players', 'Team', 'Opponent', 'GameID', 'Date', 'Court', 'MP',\n 'FG', 'FGA', '3P', '3PA', 'FT', 'FTA', 'ORB', 'DRB', 'AST', 'STL',\n 'BLK', 'TOV', 'PF', 'PTS']]\n return df\n\n\ndef getGameBoxScore():\n url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n teams = getBoxScoreTeams(soup)\n gameDate = getGameDate(soup)\n homeTeam = getHomeTeam(url)\n gameId = getGameId(url)\n fileName = getFileName(url)\n for div in soup.find_all('tr', {'class': 'over_header'}):\n div.decompose()\n masterDf = pd.DataFrame()\n for team in teams:\n team['table'] = soup.find_all('table', {'id': 'box-' + team['abrv'] +\n '-game-basic'})\n df = pd.read_html(str(team['table']))[0]\n df['Team'] = team['name']\n df['Opponent'] = team['opponent']\n df['Date'] = gameDate\n df['GameID'] = gameId\n if team['abrv'] == homeTeam:\n df['Court'] = 'Home'\n else:\n df['Court'] = 'Away'\n masterDf = pd.concat([masterDf, df], ignore_index=True)\n masterDf = removeSummaryRows(masterDf)\n masterDf = replaceDNP(masterDf)\n masterDf = updateColumns(masterDf)\n masterDf = orderColumns(masterDf)\n print(masterDf.head(2))\n masterDf.to_csv(fileName + '.csv', index=False, sep='\\t', encoding='utf-8')\n with open(fileName + '.csv', 'a') as fd:\n fd.write('\\n')\n fd.write('Sample Link:' + '\\t' + url)\n\n\ngetGameBoxScore()\n",
"step-5": "import pandas as pd\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nnbaBoxUrl = 'https://www.basketball-reference.com/boxscores/'\n\nboxScoreClass = 'stats_table'\n\ndef getBoxScoreLinks():\n page = requests.get(nbaBoxUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n gameLinks = []\n data = soup.findAll('td', {'class': 'right gamelink'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n gameLinks.append(a['href'])\n return gameLinks\n\ndef getBoxScoreTeams(soup):\n data = soup.find('div', {'class': 'scorebox'})\n substring = 'teams'\n teams = []\n team = {'name':'', 'abrv':'', 'table' : '', 'opponent' : ''}\n for a in data.find_all('a', href=True):\n if substring in a['href']:\n new = team.copy()\n new['name'] = a.getText()\n new['abrv'] = a['href'].split('/')[2]\n teams.append(new)\n #set opponent\n for team in teams:\n for opponent in teams:\n if team['name'] != opponent['name']:\n team['opponent'] = opponent['name']\n return teams\n\ndef getGameDate(soup):\n for div in soup.find_all('div', {'class': 'scorebox_meta'}):\n childdiv = div.find('div')\n #format date\n datetime_object = datetime.strptime(childdiv.string, '%I:%M %p, %B %d, %Y')\n return datetime_object.strftime(\"%m/%d/%Y\")\n\ndef getHomeTeam(url):\n homeTeam = url.split('/')[4]\n homeTeam = re.findall(\"[a-zA-Z]+\", homeTeam)[0]\n return homeTeam\n\ndef getGameId(url):\n gameId = url.split('/')[4]\n gameId = re.findall(\"\\d+\", gameId)[0]\n return gameId\n\ndef getFileName(url):\n fileName = url.split('/')[4]\n fileName = fileName.rsplit( \".\", 1 )[ 0 ]\n return fileName\n\ndef removeSummaryRows(df):\n df = df[df.Starters != 'Team Totals']\n df = df[df.Starters != 'Reserves']\n return df\n\ndef updateColumns(df):\n df = df.drop('FG%', 1)\n #rename\n df = df.rename({'Starters': 'Players'}, axis=1) \n return df\n\ndef replaceDNP(df):\n df = df.replace('Did Not Play', 0)\n return df\n\ndef orderColumns(df):\n df = df[['Players', 'Team', 'Opponent', 'GameID', 'Date', 'Court', 'MP', 'FG', 'FGA', '3P', '3PA', 'FT', 'FTA', 'ORB', 'DRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']]\n return df\n\ndef getGameBoxScore():\n url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n #get teams\n teams = getBoxScoreTeams(soup)\n gameDate = getGameDate(soup)\n homeTeam = getHomeTeam(url)\n gameId = getGameId(url)\n fileName = getFileName(url)\n\n #Remove extra header\n for div in soup.find_all(\"tr\", {'class':'over_header'}): \n div.decompose()\n\n masterDf = pd.DataFrame()\n for team in teams:\n team['table'] = soup.find_all(\"table\", {'id':'box-'+ team['abrv'] +'-game-basic'})\n #format dataframe\n df = pd.read_html(str(team['table']))[0]\n\n #constants\n df['Team'] = team['name']\n df['Opponent'] = team['opponent']\n df['Date'] = gameDate\n df['GameID'] = gameId\n\n\n if team['abrv'] == homeTeam:\n df['Court'] = 'Home'\n else:\n df['Court'] = 'Away'\n\n masterDf = pd.concat([masterDf, df], ignore_index=True)\n #master_df = master_df.append(df,ignore_index=True)\n\n #format dataframe\n masterDf = removeSummaryRows(masterDf)\n masterDf = replaceDNP(masterDf)\n masterDf = updateColumns(masterDf)\n masterDf = orderColumns(masterDf)\n print(masterDf.head(2))\n masterDf.to_csv(fileName + '.csv', index=False, sep='\\t', encoding='utf-8')\n\n #add footer row\n with open(fileName + '.csv','a') as fd:\n fd.write('\\n')\n fd.write('Sample Link:' + '\\t' + url)\n \n\n#gameLinks = getBoxScoreLinks()\ngetGameBoxScore()\n",
"step-ids": [
8,
9,
11,
12,
15
]
}
|
[
8,
9,
11,
12,
15
] |
# x = 10
#
# def increment():
# x += 1
#
# ^^ Non-working code
x = 10
def increment(number):
number += 1
return number
# If we want to change a global variable,
# we have to do it like this
x = increment(x)
|
normal
|
{
"blob_id": "a0460b100a750b685f3e831a19379b0e26da4b35",
"index": 7368,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef increment(number):\n number += 1\n return number\n\n\n<mask token>\n",
"step-3": "x = 10\n\n\ndef increment(number):\n number += 1\n return number\n\n\nx = increment(x)\n",
"step-4": "# x = 10\n#\n# def increment():\n# x += 1\n# \n# ^^ Non-working code\n\nx = 10\n\ndef increment(number): \n number += 1\n return number\n\n# If we want to change a global variable,\n# we have to do it like this\nx = increment(x)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from .models import User
# Register your models here.
@admin.register(User)
class AuthorizationUserAdmin(admin.ModelAdmin):
exclude = ['open_id']
pass
|
normal
|
{
"blob_id": "d3585e7b761fa7b2eeaacf09f84bb6a4abc1cf02",
"index": 6806,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n <mask token>\n pass\n",
"step-3": "<mask token>\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n",
"step-4": "from django.contrib import admin\nfrom .models import User\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n",
"step-5": "from django.contrib import admin\r\nfrom .models import User\r\n\r\n\r\n# Register your models here.\r\n\r\[email protected](User)\r\nclass AuthorizationUserAdmin(admin.ModelAdmin):\r\n exclude = ['open_id']\r\n pass\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Item pipelines
import logging
import hashlib
from wsgiref.handlers import format_date_time
import time
import itertools
import psycopg2
from psycopg2.extensions import AsIs
from psycopg2.extras import Json
import requests
from scrapy import signals
from scrapy.pipelines.files import FilesPipeline
from twisted.enterprise import adbapi
from twisted.internet import threads
logger = logging.getLogger(__name__)
class DBStorePipeline(object):
'''
This class save the crawled item to a PostgreSQL table
The db operation is async and managed by the twisted reactor loop.
(References from https://gist.github.com/tzermias/6982723)
'''
@classmethod
def from_crawler(cls, crawler):
instance = cls(crawler.stats, crawler.settings)
crawler.signals.connect(instance.spider_closed, signals.spider_closed)
return instance
def __init__(self, stats, settings):
# Instantiate DB
self.dbpool = adbapi.ConnectionPool('psycopg2', settings['DB_DSN'])
self.stats = stats
def spider_closed(self, spider):
self.dbpool.close()
def process_item(self, item, spider):
table = getattr(item, "db_table", None)
if not table:
return item
query = self.dbpool.runInteraction(self._save_item, table, item)
query.addErrback(self._handle_error)
return item
def _save_item(self, tx, table, item):
skip_fields = getattr(item, "db_skip_fields", [])
cols = [k for k in item if k not in skip_fields]
self._insert_row(tx, table, cols, item)
self.stats.inc_value('database/records_added')
if hasattr(item, "db_helper_table_rows"):
helper_table, helper_rows = item.db_helper_table_rows()
if helper_rows:
self._insert_row(tx, helper_table,
helper_rows[0].keys(), *helper_rows)
self.stats.inc_value(
'database/records_added', len(helper_rows))
return item
def _insert_row(self, tx, table, cols, *rows):
val_fmt = "({})".format(",".join(itertools.repeat("%s", len(cols))))
def mk_row_param(row):
return tuple(row[k] for k in cols)
data_str = ','.join(tx.mogrify(val_fmt, mk_row_param(row)).decode('utf-8')
for row in rows)
q = "INSERT INTO {} ({}) VALUES ".format(table, ",".join(cols))
tx.execute(q + data_str)
def _handle_error(self, e):
logger.error("failed to track item to DB: %s", e)
class UpYunStore(object):
OPERATOR = None
SIGNATURE = None
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('upyun://')
self.session = requests.Session()
self.bucket, self.prefix = uri[8:].split("/", 1)
def stat_file(self, path, info):
"""
TODO fetch and return file meta info from cloud
"""
return {}
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to Azure blob storage"""
headers = {
"Authorization": "UPYUN: {}:{}".format(self.OPERATOR, self.SIGNATURE),
"Date": format_date_time(int(time.time())),
}
url = "http://v0.api.upyun.com:5000/{}/{}{}".format(
self.bucket, self.prefix, path)
def upload():
try:
res = requests.put(url, headers=headers, data=buf)
if res.status_code != 200:
logger.info(
"failed to upload file %s to upyun, response code: %s, text:\n%s",
path, res.status_code, res.text)
else:
logger.debug("uploaded file %s to upyun", path)
except Exception:
logger.warn("upload file %s to upyun failed",
path, exc_info=True)
return threads.deferToThread(upload)
class MbCrawlImagesPipeline(FilesPipeline):
STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)
STORE_SCHEMES["upyun"] = UpYunStore
@classmethod
def from_settings(cls, settings):
upyunStore = cls.STORE_SCHEMES["upyun"]
upyunStore.OPERATOR = settings["UPYUN_OPERATOR"]
UpYunStore.SIGNATURE = settings["SIGNATURE"]
return super().from_settings(settings)
|
normal
|
{
"blob_id": "d08e4c85890dab7cb421fa994ef1947d8919d58f",
"index": 8547,
"step-1": "<mask token>\n\n\nclass UpYunStore(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, uri):\n assert uri.startswith('upyun://')\n self.session = requests.Session()\n self.bucket, self.prefix = uri[8:].split('/', 1)\n\n def stat_file(self, path, info):\n \"\"\"\n TODO fetch and return file meta info from cloud\n \"\"\"\n return {}\n\n def persist_file(self, path, buf, info, meta=None, headers=None):\n \"\"\"Upload file to Azure blob storage\"\"\"\n headers = {'Authorization': 'UPYUN: {}:{}'.format(self.OPERATOR,\n self.SIGNATURE), 'Date': format_date_time(int(time.time()))}\n url = 'http://v0.api.upyun.com:5000/{}/{}{}'.format(self.bucket,\n self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n 'failed to upload file %s to upyun, response code: %s, text:\\n%s'\n , path, res.status_code, res.text)\n else:\n logger.debug('uploaded file %s to upyun', path)\n except Exception:\n logger.warn('upload file %s to upyun failed', path,\n exc_info=True)\n return threads.deferToThread(upload)\n\n\nclass MbCrawlImagesPipeline(FilesPipeline):\n STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)\n STORE_SCHEMES['upyun'] = UpYunStore\n\n @classmethod\n def from_settings(cls, settings):\n upyunStore = cls.STORE_SCHEMES['upyun']\n upyunStore.OPERATOR = settings['UPYUN_OPERATOR']\n UpYunStore.SIGNATURE = settings['SIGNATURE']\n return super().from_settings(settings)\n",
"step-2": "<mask token>\n\n\nclass UpYunStore(object):\n OPERATOR = None\n SIGNATURE = None\n HEADERS = {'Cache-Control': 'max-age=172800'}\n\n def __init__(self, uri):\n assert uri.startswith('upyun://')\n self.session = requests.Session()\n self.bucket, self.prefix = uri[8:].split('/', 1)\n\n def stat_file(self, path, info):\n \"\"\"\n TODO fetch and return file meta info from cloud\n \"\"\"\n return {}\n\n def persist_file(self, path, buf, info, meta=None, headers=None):\n \"\"\"Upload file to Azure blob storage\"\"\"\n headers = {'Authorization': 'UPYUN: {}:{}'.format(self.OPERATOR,\n self.SIGNATURE), 'Date': format_date_time(int(time.time()))}\n url = 'http://v0.api.upyun.com:5000/{}/{}{}'.format(self.bucket,\n self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n 'failed to upload file %s to upyun, response code: %s, text:\\n%s'\n , path, res.status_code, res.text)\n else:\n logger.debug('uploaded file %s to upyun', path)\n except Exception:\n logger.warn('upload file %s to upyun failed', path,\n exc_info=True)\n return threads.deferToThread(upload)\n\n\nclass MbCrawlImagesPipeline(FilesPipeline):\n STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)\n STORE_SCHEMES['upyun'] = UpYunStore\n\n @classmethod\n def from_settings(cls, settings):\n upyunStore = cls.STORE_SCHEMES['upyun']\n upyunStore.OPERATOR = settings['UPYUN_OPERATOR']\n UpYunStore.SIGNATURE = settings['SIGNATURE']\n return super().from_settings(settings)\n",
"step-3": "<mask token>\n\n\nclass DBStorePipeline(object):\n <mask token>\n <mask token>\n <mask token>\n\n def spider_closed(self, spider):\n self.dbpool.close()\n <mask token>\n <mask token>\n <mask token>\n\n def _handle_error(self, e):\n logger.error('failed to track item to DB: %s', e)\n\n\nclass UpYunStore(object):\n OPERATOR = None\n SIGNATURE = None\n HEADERS = {'Cache-Control': 'max-age=172800'}\n\n def __init__(self, uri):\n assert uri.startswith('upyun://')\n self.session = requests.Session()\n self.bucket, self.prefix = uri[8:].split('/', 1)\n\n def stat_file(self, path, info):\n \"\"\"\n TODO fetch and return file meta info from cloud\n \"\"\"\n return {}\n\n def persist_file(self, path, buf, info, meta=None, headers=None):\n \"\"\"Upload file to Azure blob storage\"\"\"\n headers = {'Authorization': 'UPYUN: {}:{}'.format(self.OPERATOR,\n self.SIGNATURE), 'Date': format_date_time(int(time.time()))}\n url = 'http://v0.api.upyun.com:5000/{}/{}{}'.format(self.bucket,\n self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n 'failed to upload file %s to upyun, response code: %s, text:\\n%s'\n , path, res.status_code, res.text)\n else:\n logger.debug('uploaded file %s to upyun', path)\n except Exception:\n logger.warn('upload file %s to upyun failed', path,\n exc_info=True)\n return threads.deferToThread(upload)\n\n\nclass MbCrawlImagesPipeline(FilesPipeline):\n STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)\n STORE_SCHEMES['upyun'] = UpYunStore\n\n @classmethod\n def from_settings(cls, settings):\n upyunStore = cls.STORE_SCHEMES['upyun']\n upyunStore.OPERATOR = settings['UPYUN_OPERATOR']\n UpYunStore.SIGNATURE = settings['SIGNATURE']\n return super().from_settings(settings)\n",
"step-4": "<mask token>\n\n\nclass DBStorePipeline(object):\n <mask token>\n\n @classmethod\n def from_crawler(cls, crawler):\n instance = cls(crawler.stats, crawler.settings)\n crawler.signals.connect(instance.spider_closed, signals.spider_closed)\n return instance\n\n def __init__(self, stats, settings):\n self.dbpool = adbapi.ConnectionPool('psycopg2', settings['DB_DSN'])\n self.stats = stats\n\n def spider_closed(self, spider):\n self.dbpool.close()\n\n def process_item(self, item, spider):\n table = getattr(item, 'db_table', None)\n if not table:\n return item\n query = self.dbpool.runInteraction(self._save_item, table, item)\n query.addErrback(self._handle_error)\n return item\n <mask token>\n <mask token>\n\n def _handle_error(self, e):\n logger.error('failed to track item to DB: %s', e)\n\n\nclass UpYunStore(object):\n OPERATOR = None\n SIGNATURE = None\n HEADERS = {'Cache-Control': 'max-age=172800'}\n\n def __init__(self, uri):\n assert uri.startswith('upyun://')\n self.session = requests.Session()\n self.bucket, self.prefix = uri[8:].split('/', 1)\n\n def stat_file(self, path, info):\n \"\"\"\n TODO fetch and return file meta info from cloud\n \"\"\"\n return {}\n\n def persist_file(self, path, buf, info, meta=None, headers=None):\n \"\"\"Upload file to Azure blob storage\"\"\"\n headers = {'Authorization': 'UPYUN: {}:{}'.format(self.OPERATOR,\n self.SIGNATURE), 'Date': format_date_time(int(time.time()))}\n url = 'http://v0.api.upyun.com:5000/{}/{}{}'.format(self.bucket,\n self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n 'failed to upload file %s to upyun, response code: %s, text:\\n%s'\n , path, res.status_code, res.text)\n else:\n logger.debug('uploaded file %s to upyun', path)\n except Exception:\n logger.warn('upload file %s to upyun failed', path,\n exc_info=True)\n return threads.deferToThread(upload)\n\n\nclass MbCrawlImagesPipeline(FilesPipeline):\n STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)\n STORE_SCHEMES['upyun'] = UpYunStore\n\n @classmethod\n def from_settings(cls, settings):\n upyunStore = cls.STORE_SCHEMES['upyun']\n upyunStore.OPERATOR = settings['UPYUN_OPERATOR']\n UpYunStore.SIGNATURE = settings['SIGNATURE']\n return super().from_settings(settings)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Item pipelines\nimport logging\nimport hashlib\nfrom wsgiref.handlers import format_date_time\nimport time\nimport itertools\n\nimport psycopg2\nfrom psycopg2.extensions import AsIs\nfrom psycopg2.extras import Json\nimport requests\nfrom scrapy import signals\nfrom scrapy.pipelines.files import FilesPipeline\nfrom twisted.enterprise import adbapi\nfrom twisted.internet import threads\n\nlogger = logging.getLogger(__name__)\n\n\nclass DBStorePipeline(object):\n '''\n This class save the crawled item to a PostgreSQL table\n The db operation is async and managed by the twisted reactor loop.\n (References from https://gist.github.com/tzermias/6982723)\n '''\n\n @classmethod\n def from_crawler(cls, crawler):\n instance = cls(crawler.stats, crawler.settings)\n crawler.signals.connect(instance.spider_closed, signals.spider_closed)\n return instance\n\n def __init__(self, stats, settings):\n # Instantiate DB\n self.dbpool = adbapi.ConnectionPool('psycopg2', settings['DB_DSN'])\n self.stats = stats\n\n def spider_closed(self, spider):\n self.dbpool.close()\n\n def process_item(self, item, spider):\n table = getattr(item, \"db_table\", None)\n if not table:\n return item\n\n query = self.dbpool.runInteraction(self._save_item, table, item)\n query.addErrback(self._handle_error)\n return item\n\n def _save_item(self, tx, table, item):\n\n skip_fields = getattr(item, \"db_skip_fields\", [])\n\n cols = [k for k in item if k not in skip_fields]\n self._insert_row(tx, table, cols, item)\n self.stats.inc_value('database/records_added')\n if hasattr(item, \"db_helper_table_rows\"):\n helper_table, helper_rows = item.db_helper_table_rows()\n if helper_rows:\n self._insert_row(tx, helper_table,\n helper_rows[0].keys(), *helper_rows)\n self.stats.inc_value(\n 'database/records_added', len(helper_rows))\n\n return item\n\n def _insert_row(self, tx, table, cols, *rows):\n val_fmt = \"({})\".format(\",\".join(itertools.repeat(\"%s\", len(cols))))\n\n def mk_row_param(row):\n return tuple(row[k] for k in cols)\n data_str = ','.join(tx.mogrify(val_fmt, mk_row_param(row)).decode('utf-8')\n for row in rows)\n q = \"INSERT INTO {} ({}) VALUES \".format(table, \",\".join(cols))\n tx.execute(q + data_str)\n\n def _handle_error(self, e):\n logger.error(\"failed to track item to DB: %s\", e)\n\n\nclass UpYunStore(object):\n\n OPERATOR = None\n SIGNATURE = None\n\n HEADERS = {\n 'Cache-Control': 'max-age=172800',\n }\n\n def __init__(self, uri):\n assert uri.startswith('upyun://')\n self.session = requests.Session()\n self.bucket, self.prefix = uri[8:].split(\"/\", 1)\n\n def stat_file(self, path, info):\n \"\"\"\n TODO fetch and return file meta info from cloud\n \"\"\"\n return {}\n\n def persist_file(self, path, buf, info, meta=None, headers=None):\n \"\"\"Upload file to Azure blob storage\"\"\"\n headers = {\n \"Authorization\": \"UPYUN: {}:{}\".format(self.OPERATOR, self.SIGNATURE),\n \"Date\": format_date_time(int(time.time())),\n }\n url = \"http://v0.api.upyun.com:5000/{}/{}{}\".format(\n self.bucket, self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n \"failed to upload file %s to upyun, response code: %s, text:\\n%s\",\n path, res.status_code, res.text)\n else:\n logger.debug(\"uploaded file %s to upyun\", path)\n except Exception:\n logger.warn(\"upload file %s to upyun failed\",\n path, exc_info=True)\n return threads.deferToThread(upload)\n\n\nclass MbCrawlImagesPipeline(FilesPipeline):\n STORE_SCHEMES = dict(FilesPipeline.STORE_SCHEMES)\n STORE_SCHEMES[\"upyun\"] = UpYunStore\n\n @classmethod\n def from_settings(cls, settings):\n upyunStore = cls.STORE_SCHEMES[\"upyun\"]\n upyunStore.OPERATOR = settings[\"UPYUN_OPERATOR\"]\n UpYunStore.SIGNATURE = settings[\"SIGNATURE\"]\n return super().from_settings(settings)\n",
"step-ids": [
7,
8,
11,
14,
20
]
}
|
[
7,
8,
11,
14,
20
] |
from mathmodule import *
import sys
print("Welcome to my basic \'Calculator\'")
print("Please choose your best option (+, -, *, /) ")
# user input part
while True:
try:
A = int(input("Now Enter your first Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
while True:
mathoparetor = input("Enter your Math oparetor=")
try:
if mathoparetor in ['+','-','*','/']:
break
else:
raise Exception
except:
print("Opp, Enter Math again")
while True:
try:
B = int(input("Now Enter your second Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
# programing for perform
if mathoparetor == '+':
print('The addition number is', add(A,B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A,B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A,B))
elif mathoparetor == '/':
print('The division number is', divi(A,B))
|
normal
|
{
"blob_id": "1cca94040cdd8db9d98f587c62eff7c58eae7535",
"index": 6974,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-3": "from mathmodule import *\nimport sys\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-4": "from mathmodule import *\nimport sys\n\nprint(\"Welcome to my basic \\'Calculator\\'\")\n\nprint(\"Please choose your best option (+, -, *, /) \")\n\n# user input part \nwhile True:\n try:\n A = int(input(\"Now Enter your first Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\nwhile True:\n mathoparetor = input(\"Enter your Math oparetor=\")\n try:\n if mathoparetor in ['+','-','*','/']:\n break\n else:\n raise Exception\n except:\n print(\"Opp, Enter Math again\")\n\nwhile True:\n try:\n B = int(input(\"Now Enter your second Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n\n\n\n# programing for perform\nif mathoparetor == '+':\n print('The addition number is', add(A,B))\n\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A,B))\n\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A,B))\n\nelif mathoparetor == '/':\n print('The division number is', divi(A,B))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
class ProdutoManager(models.Manager):
def for_categoria(self, categoria):
return self.filter(categoria=categoria)
|
normal
|
{
"blob_id": "d698fa1b43387ee0b73687df2764c30e04ee6fd0",
"index": 2814,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProdutoManager(models.Manager):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ProdutoManager(models.Manager):\n\n def for_categoria(self, categoria):\n return self.filter(categoria=categoria)\n",
"step-4": "from django.db import models\n\n\nclass ProdutoManager(models.Manager):\n\n def for_categoria(self, categoria):\n return self.filter(categoria=categoria)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print('Hi, I am Nag')
|
normal
|
{
"blob_id": "0ca751e050244fd85c8110d02d5e7a79eb449ada",
"index": 8542,
"step-1": "<mask token>\n",
"step-2": "print('Hi, I am Nag')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import bpy
class TILA_Config_LogElement(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(default='')
icon: bpy.props.StringProperty(default='BLANK1')
class TILA_Config_LogList(bpy.types.UIList):
bl_idname = "TILA_UL_Config_log_list"
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = "TILA_UL_Config_status_list"
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log():
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log)-1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
|
normal
|
{
"blob_id": "7fa7a632078ce4f0052e3cadf11d5efd47a1fad5",
"index": 831,
"step-1": "<mask token>\n\n\nclass TILA_Config_LogList(bpy.types.UIList):\n <mask token>\n <mask token>\n\n\nclass TILA_Config_SatusList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_status_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_Log:\n\n def __init__(self, log, index_name):\n self.log = log\n self.index_name = index_name\n\n def append(self, name, icon='BLANK1'):\n element = self.log.add()\n element.name = name\n element.icon = icon\n setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)\n\n def info(self, name):\n self.append(name, icon='INFO')\n\n def warning(self, name):\n self.append(name, icon='ERROR')\n\n def error(self, name):\n self.append(name, icon='CANCEL')\n\n def start(self, name):\n self.append(name, icon='TRIA_RIGHT')\n\n def done(self, name):\n self.append(name, icon='CHECKMARK')\n",
"step-2": "<mask token>\n\n\nclass TILA_Config_LogList(bpy.types.UIList):\n <mask token>\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_SatusList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_status_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_Log:\n\n def __init__(self, log, index_name):\n self.log = log\n self.index_name = index_name\n\n def append(self, name, icon='BLANK1'):\n element = self.log.add()\n element.name = name\n element.icon = icon\n setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)\n\n def info(self, name):\n self.append(name, icon='INFO')\n\n def warning(self, name):\n self.append(name, icon='ERROR')\n\n def error(self, name):\n self.append(name, icon='CANCEL')\n\n def start(self, name):\n self.append(name, icon='TRIA_RIGHT')\n\n def done(self, name):\n self.append(name, icon='CHECKMARK')\n",
"step-3": "<mask token>\n\n\nclass TILA_Config_LogList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_log_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_SatusList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_status_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_Log:\n\n def __init__(self, log, index_name):\n self.log = log\n self.index_name = index_name\n\n def append(self, name, icon='BLANK1'):\n element = self.log.add()\n element.name = name\n element.icon = icon\n setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)\n\n def info(self, name):\n self.append(name, icon='INFO')\n\n def warning(self, name):\n self.append(name, icon='ERROR')\n\n def error(self, name):\n self.append(name, icon='CANCEL')\n\n def start(self, name):\n self.append(name, icon='TRIA_RIGHT')\n\n def done(self, name):\n self.append(name, icon='CHECKMARK')\n",
"step-4": "<mask token>\n\n\nclass TILA_Config_LogElement(bpy.types.PropertyGroup):\n name: bpy.props.StringProperty(default='')\n icon: bpy.props.StringProperty(default='BLANK1')\n\n\nclass TILA_Config_LogList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_log_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_SatusList(bpy.types.UIList):\n bl_idname = 'TILA_UL_Config_status_list'\n\n def draw_item(self, context, layout, data, item, icon, active_data,\n active_propname, index):\n row = layout.row(align=True)\n row.label(text=item.name, icon=item.icon)\n\n\nclass TILA_Config_Log:\n\n def __init__(self, log, index_name):\n self.log = log\n self.index_name = index_name\n\n def append(self, name, icon='BLANK1'):\n element = self.log.add()\n element.name = name\n element.icon = icon\n setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)\n\n def info(self, name):\n self.append(name, icon='INFO')\n\n def warning(self, name):\n self.append(name, icon='ERROR')\n\n def error(self, name):\n self.append(name, icon='CANCEL')\n\n def start(self, name):\n self.append(name, icon='TRIA_RIGHT')\n\n def done(self, name):\n self.append(name, icon='CHECKMARK')\n",
"step-5": "import bpy\n\n\nclass TILA_Config_LogElement(bpy.types.PropertyGroup):\n\tname: bpy.props.StringProperty(default='')\n\ticon: bpy.props.StringProperty(default='BLANK1')\n\nclass TILA_Config_LogList(bpy.types.UIList):\n\tbl_idname = \"TILA_UL_Config_log_list\"\n\t\n\tdef draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n\t\trow = layout.row(align=True)\n\t\trow.label(text=item.name, icon=item.icon)\n\nclass TILA_Config_SatusList(bpy.types.UIList):\n\tbl_idname = \"TILA_UL_Config_status_list\"\n\t\n\tdef draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n\t\trow = layout.row(align=True)\n\t\trow.label(text=item.name, icon=item.icon)\n\nclass TILA_Config_Log():\n\tdef __init__(self, log, index_name):\n\t\tself.log = log\n\t\tself.index_name = index_name\n\n\tdef append(self, name, icon='BLANK1'):\n\t\telement = self.log.add()\n\t\telement.name = name\n\t\telement.icon = icon\n\t\tsetattr(bpy.context.window_manager, self.index_name, len(self.log)-1)\n\t\n\tdef info(self, name):\n\t\tself.append(name, icon='INFO')\n\n\tdef warning(self, name):\n\t\tself.append(name, icon='ERROR')\n\n\tdef error(self, name):\n\t\tself.append(name, icon='CANCEL')\n\n\tdef start(self, name):\n\t\tself.append(name, icon='TRIA_RIGHT')\n\t\n\tdef done(self, name):\n\t\tself.append(name, icon='CHECKMARK')\n",
"step-ids": [
12,
13,
14,
15,
17
]
}
|
[
12,
13,
14,
15,
17
] |
""" Guess the number! """
import random, generic
def check_answer(player_guess, guess_value):
"""
Compares a player's guess and the number to guess
Returns True if the player guessed correctly
Returns False by default
"""
end_game = False
if player_guess > guess_value:
print('guess too high!')
elif player_guess < guess_value:
print('guess too low!')
else:
print('correct!')
end_game = True
return end_game
def check_input(min_guess_range, max_guess_range):
""" Asks user to enter guess and returns a guess within defined min and max guess range """
while True:
try:
playerGuess = int(input('enter your guess: '))
assert min_guess_range <= playerGuess <= max_guess_range
except AssertionError:
print('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))
except ValueError:
print('numbers only!')
else:
return playerGuess
def guess_number(min_guess_range, max_guess_range):
""" Returns a guess that is within defined min and max guess range """
print(f'guess the number between {min_guess_range} and {max_guess_range}!')
return check_input(min_guess_range, max_guess_range)
def generate_guess_value(min_guess_range=1, max_guess_range=10):
"""
Returns a random number to guess between a defined min and max range.
Min and Max range can be custom. Default values are 1 - 10
"""
return random.randrange(min_guess_range, max_guess_range), min_guess_range, max_guess_range
def main():
run_game = True
while run_game == True:
guess_value, min_guess_range, max_guess_range = generate_guess_value(1,4)
guess_count, guess_limit = 1, 3
end_game = False
while end_game == False:
print(f'You have {guess_limit - guess_count + 1} remaining. ')
player_guess = guess_number(min_guess_range, max_guess_range)
player_won = check_answer(player_guess, guess_value)
guess_count = guess_count + 1
if player_won == True:
print(f'You win! congrats! ')
end_game = True
elif guess_count > guess_limit:
print(f'You ran out of guesses! you lose!')
end_game = True
run_game = generic.run_again()
if __name__ == '__main__':
main()
# [*] number to guess is generated within min - max range
# [*] guess limit is set
# [*] guess is made -> check guess within range
# [*] guess made compared to number to guess
# [*] loop again until guess limit runs out or until guess made matchess number to guess
|
normal
|
{
"blob_id": "a1e54a0f593149c1d97e64342c99f0ab8aa28fa9",
"index": 6215,
"step-1": "<mask token>\n\n\ndef check_answer(player_guess, guess_value):\n \"\"\"\n\tCompares a player's guess and the number to guess\n\tReturns True if the player guessed correctly\n\tReturns False by default\n\t\"\"\"\n end_game = False\n if player_guess > guess_value:\n print('guess too high!')\n elif player_guess < guess_value:\n print('guess too low!')\n else:\n print('correct!')\n end_game = True\n return end_game\n\n\n<mask token>\n\n\ndef guess_number(min_guess_range, max_guess_range):\n \"\"\" Returns a guess that is within defined min and max guess range \"\"\"\n print(f'guess the number between {min_guess_range} and {max_guess_range}!')\n return check_input(min_guess_range, max_guess_range)\n\n\n<mask token>\n\n\ndef main():\n run_game = True\n while run_game == True:\n guess_value, min_guess_range, max_guess_range = generate_guess_value(\n 1, 4)\n guess_count, guess_limit = 1, 3\n end_game = False\n while end_game == False:\n print(f'You have {guess_limit - guess_count + 1} remaining. ')\n player_guess = guess_number(min_guess_range, max_guess_range)\n player_won = check_answer(player_guess, guess_value)\n guess_count = guess_count + 1\n if player_won == True:\n print(f'You win! congrats! ')\n end_game = True\n elif guess_count > guess_limit:\n print(f'You ran out of guesses! you lose!')\n end_game = True\n run_game = generic.run_again()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_answer(player_guess, guess_value):\n \"\"\"\n\tCompares a player's guess and the number to guess\n\tReturns True if the player guessed correctly\n\tReturns False by default\n\t\"\"\"\n end_game = False\n if player_guess > guess_value:\n print('guess too high!')\n elif player_guess < guess_value:\n print('guess too low!')\n else:\n print('correct!')\n end_game = True\n return end_game\n\n\ndef check_input(min_guess_range, max_guess_range):\n \"\"\" Asks user to enter guess and returns a guess within defined min and max guess range \"\"\"\n while True:\n try:\n playerGuess = int(input('enter your guess: '))\n assert min_guess_range <= playerGuess <= max_guess_range\n except AssertionError:\n print('guess should be between {0} - {1}!'.format(\n min_guess_range, max_guess_range))\n except ValueError:\n print('numbers only!')\n else:\n return playerGuess\n\n\ndef guess_number(min_guess_range, max_guess_range):\n \"\"\" Returns a guess that is within defined min and max guess range \"\"\"\n print(f'guess the number between {min_guess_range} and {max_guess_range}!')\n return check_input(min_guess_range, max_guess_range)\n\n\ndef generate_guess_value(min_guess_range=1, max_guess_range=10):\n \"\"\"\n\tReturns a random number to guess between a defined min and max range.\n\tMin and Max range can be custom. Default values are 1 - 10\n\t\"\"\"\n return random.randrange(min_guess_range, max_guess_range\n ), min_guess_range, max_guess_range\n\n\ndef main():\n run_game = True\n while run_game == True:\n guess_value, min_guess_range, max_guess_range = generate_guess_value(\n 1, 4)\n guess_count, guess_limit = 1, 3\n end_game = False\n while end_game == False:\n print(f'You have {guess_limit - guess_count + 1} remaining. ')\n player_guess = guess_number(min_guess_range, max_guess_range)\n player_won = check_answer(player_guess, guess_value)\n guess_count = guess_count + 1\n if player_won == True:\n print(f'You win! congrats! ')\n end_game = True\n elif guess_count > guess_limit:\n print(f'You ran out of guesses! you lose!')\n end_game = True\n run_game = generic.run_again()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_answer(player_guess, guess_value):\n \"\"\"\n\tCompares a player's guess and the number to guess\n\tReturns True if the player guessed correctly\n\tReturns False by default\n\t\"\"\"\n end_game = False\n if player_guess > guess_value:\n print('guess too high!')\n elif player_guess < guess_value:\n print('guess too low!')\n else:\n print('correct!')\n end_game = True\n return end_game\n\n\ndef check_input(min_guess_range, max_guess_range):\n \"\"\" Asks user to enter guess and returns a guess within defined min and max guess range \"\"\"\n while True:\n try:\n playerGuess = int(input('enter your guess: '))\n assert min_guess_range <= playerGuess <= max_guess_range\n except AssertionError:\n print('guess should be between {0} - {1}!'.format(\n min_guess_range, max_guess_range))\n except ValueError:\n print('numbers only!')\n else:\n return playerGuess\n\n\ndef guess_number(min_guess_range, max_guess_range):\n \"\"\" Returns a guess that is within defined min and max guess range \"\"\"\n print(f'guess the number between {min_guess_range} and {max_guess_range}!')\n return check_input(min_guess_range, max_guess_range)\n\n\ndef generate_guess_value(min_guess_range=1, max_guess_range=10):\n \"\"\"\n\tReturns a random number to guess between a defined min and max range.\n\tMin and Max range can be custom. Default values are 1 - 10\n\t\"\"\"\n return random.randrange(min_guess_range, max_guess_range\n ), min_guess_range, max_guess_range\n\n\ndef main():\n run_game = True\n while run_game == True:\n guess_value, min_guess_range, max_guess_range = generate_guess_value(\n 1, 4)\n guess_count, guess_limit = 1, 3\n end_game = False\n while end_game == False:\n print(f'You have {guess_limit - guess_count + 1} remaining. ')\n player_guess = guess_number(min_guess_range, max_guess_range)\n player_won = check_answer(player_guess, guess_value)\n guess_count = guess_count + 1\n if player_won == True:\n print(f'You win! congrats! ')\n end_game = True\n elif guess_count > guess_limit:\n print(f'You ran out of guesses! you lose!')\n end_game = True\n run_game = generic.run_again()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport random, generic\n\n\ndef check_answer(player_guess, guess_value):\n \"\"\"\n\tCompares a player's guess and the number to guess\n\tReturns True if the player guessed correctly\n\tReturns False by default\n\t\"\"\"\n end_game = False\n if player_guess > guess_value:\n print('guess too high!')\n elif player_guess < guess_value:\n print('guess too low!')\n else:\n print('correct!')\n end_game = True\n return end_game\n\n\ndef check_input(min_guess_range, max_guess_range):\n \"\"\" Asks user to enter guess and returns a guess within defined min and max guess range \"\"\"\n while True:\n try:\n playerGuess = int(input('enter your guess: '))\n assert min_guess_range <= playerGuess <= max_guess_range\n except AssertionError:\n print('guess should be between {0} - {1}!'.format(\n min_guess_range, max_guess_range))\n except ValueError:\n print('numbers only!')\n else:\n return playerGuess\n\n\ndef guess_number(min_guess_range, max_guess_range):\n \"\"\" Returns a guess that is within defined min and max guess range \"\"\"\n print(f'guess the number between {min_guess_range} and {max_guess_range}!')\n return check_input(min_guess_range, max_guess_range)\n\n\ndef generate_guess_value(min_guess_range=1, max_guess_range=10):\n \"\"\"\n\tReturns a random number to guess between a defined min and max range.\n\tMin and Max range can be custom. Default values are 1 - 10\n\t\"\"\"\n return random.randrange(min_guess_range, max_guess_range\n ), min_guess_range, max_guess_range\n\n\ndef main():\n run_game = True\n while run_game == True:\n guess_value, min_guess_range, max_guess_range = generate_guess_value(\n 1, 4)\n guess_count, guess_limit = 1, 3\n end_game = False\n while end_game == False:\n print(f'You have {guess_limit - guess_count + 1} remaining. ')\n player_guess = guess_number(min_guess_range, max_guess_range)\n player_won = check_answer(player_guess, guess_value)\n guess_count = guess_count + 1\n if player_won == True:\n print(f'You win! congrats! ')\n end_game = True\n elif guess_count > guess_limit:\n print(f'You ran out of guesses! you lose!')\n end_game = True\n run_game = generic.run_again()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\" Guess the number! \"\"\"\n\nimport random, generic\n\n\ndef check_answer(player_guess, guess_value):\n\t\"\"\"\n\tCompares a player's guess and the number to guess\n\tReturns True if the player guessed correctly\n\tReturns False by default\n\t\"\"\"\n\n\tend_game = False\n\n\tif player_guess > guess_value:\n\t\tprint('guess too high!')\n\telif player_guess < guess_value:\n\t\tprint('guess too low!')\n\telse:\n\t\tprint('correct!')\n\t\tend_game = True\n\treturn end_game\n\n\ndef check_input(min_guess_range, max_guess_range):\n\t\"\"\" Asks user to enter guess and returns a guess within defined min and max guess range \"\"\"\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess\n\n\ndef guess_number(min_guess_range, max_guess_range):\n\t\"\"\" Returns a guess that is within defined min and max guess range \"\"\"\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)\n\n\ndef generate_guess_value(min_guess_range=1, max_guess_range=10):\n\t\"\"\"\n\tReturns a random number to guess between a defined min and max range.\n\tMin and Max range can be custom. Default values are 1 - 10\n\t\"\"\"\n\treturn random.randrange(min_guess_range, max_guess_range), min_guess_range, max_guess_range\n\n\ndef main():\n\trun_game = True\n\twhile run_game == True:\n\t\tguess_value, min_guess_range, max_guess_range = generate_guess_value(1,4)\n\t\tguess_count, guess_limit = 1, 3\n\n\t\tend_game = False\n\t\twhile end_game == False:\n\t\t\tprint(f'You have {guess_limit - guess_count + 1} remaining. ')\n\t\t\tplayer_guess = guess_number(min_guess_range, max_guess_range)\n\t\t\tplayer_won = check_answer(player_guess, guess_value)\n\t\t\tguess_count = guess_count + 1\n\t\t\t\n\t\t\tif player_won == True:\n\t\t\t\tprint(f'You win! congrats! ')\n\t\t\t\tend_game = True\n\t\t\telif guess_count > guess_limit:\n\t\t\t\tprint(f'You ran out of guesses! you lose!')\n\t\t\t\tend_game = True\n\n\t\trun_game = generic.run_again()\n\nif __name__ == '__main__':\n\tmain()\n\n# [*] number to guess is generated within min - max range\n# [*] guess limit is set\n# [*] guess is made -> check guess within range\n# [*] guess made compared to number to guess\n# [*] loop again until guess limit runs out or until guess made matchess number to guess\n\n\n\n\n\n\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 13:42:47 2018
@author: zhan
"""
from scipy.spatial.distance import pdist, squareform, cdist
import numpy as np
import scipy.io as sci
import os,sys
import datetime
###################################################################
# I_tr:features of training set for image data
# I_te:features of testing set for image data
# T_te:features of training set for text data
# T_te:features of testing set for text data
# L_tr:category label of training set
# L_te:category label of testing set
###############################################################
def unifyKnnKernel(Z,tr_n_I, te_n_I, tr_n_T, te_n_T,k):
x1 = np.concatenate([range(tr_n_I,tr_n_I+te_n_I),
range(tr_n_I+te_n_I+tr_n_T,tr_n_I+te_n_I+tr_n_T+te_n_T)]);
x2 = np.concatenate([range(0,tr_n_I),
range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
y1 = np.concatenate([range(0,tr_n_I), range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
W = Z[x1,:];
W = W[:,y1];
W = W;
Y = Z[x2,:];
Y = Y[:,y1];
Y = Y;
KN = -np.sort(-W);
I = np.argsort(-W);
for i in range(0,te_n_I + te_n_T):
k1 = np.reshape(KN[i,0:k], [1, k]);
knn = np.concatenate([k1, np.zeros([1,tr_n_I + tr_n_T-k])],1);
W[i,I[i,:]] = knn;
WI = W[0:te_n_I, :];
WT = W[te_n_I:te_n_I+te_n_T, :];
WI_s = np.reshape(np.sum(WI, 1), [len(WI),1]);
WT_s = np.reshape(np.sum(WT, 1), [len(WI),1]);
WI = WI/np.tile(WI_s, [1, tr_n_I+tr_n_T]);
WT = WT/np.tile(WT_s, [1, tr_n_T+tr_n_I]);
#W = np.concatenate([WI,WT]);
m = np.reshape(range(tr_n_I), [tr_n_I,1]);
m1 = np.tile(np.concatenate([m, m]),[1,(tr_n_I+tr_n_T)]);
Y0 = (m1 == m1.T);
Y1 = np.multiply(Y,(1.-Y0))+Y0;
h = Y1;
W_IT = np.matmul(np.matmul(WI,h), WT.T);
return W_IT
def computer_av(distance, label):
m, n = np.shape(distance)
av_precision = np.zeros([m, 1])
sort = np.argsort(-distance)
for i in range(m):
cumulate = 0.0
tp_counter = 0.0
for j in range(50):
if np.sum(np.abs(label[sort[i,j]] - label[i])) == 0:
tp_counter += 1.0
cumulate = cumulate + (float(tp_counter)/ float(j+1))
if tp_counter !=0:
av_precision[i] = cumulate/float(tp_counter)
mean_precision = np.mean(av_precision)
return mean_precision
if __name__ == '__main__':
data1 = sci.loadmat('best_data.mat')
begin = datetime.datetime.now()
D1 = pdist(np.concatenate([data1['I_tr'], data1['I_te'],
data1['T_tr'], data1['T_te']]),'cosine');
Z1 = 1.0-squareform(D1)/2.0;
h = []
p = []
for k in range(10, 1000, 10):
distance = unifyKnnKernel(Z1,
len(data1['I_tr']),len(data1['I_te']),
len(data1['T_tr']),len(data1['T_te']),
k)
end = datetime.datetime.now()
re1 = computer_av(distance,data1['L_te'].T)
re2 = computer_av(distance.T, data1['L_te'].T)
avg = (re1 + re2)/2.0
print k
print('The KNN test result:ItoT:{: .4}; TtoI: {: .4}; avg: {: .4}'.format(re1, re2, avg))
f1 = open('knn_test.txt', "a")
f1.write('k: ')
f1.write(str(k))
f1.write('\t')
f1.write('T2I: ')
f1.write(str(re1))
f1.write('\t')
f1.write('I2T: ')
f1.write(str(re2))
f1.write('\t')
f1.write('AVG: ')
f1.write(str(avg))
f1.write('\n')
f1.close()
|
normal
|
{
"blob_id": "db140bf66f3e3a84a60a6617ea4c03cc6a1bc56d",
"index": 6271,
"step-1": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 31 13:42:47 2018\n\n@author: zhan\n\"\"\"\nfrom scipy.spatial.distance import pdist, squareform, cdist\nimport numpy as np\nimport scipy.io as sci\nimport os,sys\nimport datetime\n\n###################################################################\n# I_tr:features of training set for image data\n# I_te:features of testing set for image data\n# T_te:features of training set for text data\n# T_te:features of testing set for text data\n# L_tr:category label of training set\n# L_te:category label of testing set\n\n###############################################################\n\n\ndef unifyKnnKernel(Z,tr_n_I, te_n_I, tr_n_T, te_n_T,k):\n x1 = np.concatenate([range(tr_n_I,tr_n_I+te_n_I),\n range(tr_n_I+te_n_I+tr_n_T,tr_n_I+te_n_I+tr_n_T+te_n_T)]);\n x2 = np.concatenate([range(0,tr_n_I),\n range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);\n y1 = np.concatenate([range(0,tr_n_I), range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);\n W = Z[x1,:];\n W = W[:,y1];\n W = W;\n Y = Z[x2,:];\n Y = Y[:,y1];\n Y = Y;\n KN = -np.sort(-W);\n I = np.argsort(-W);\n for i in range(0,te_n_I + te_n_T):\n k1 = np.reshape(KN[i,0:k], [1, k]);\n knn = np.concatenate([k1, np.zeros([1,tr_n_I + tr_n_T-k])],1);\n W[i,I[i,:]] = knn;\n WI = W[0:te_n_I, :];\n WT = W[te_n_I:te_n_I+te_n_T, :];\n\n WI_s = np.reshape(np.sum(WI, 1), [len(WI),1]);\n WT_s = np.reshape(np.sum(WT, 1), [len(WI),1]);\n WI = WI/np.tile(WI_s, [1, tr_n_I+tr_n_T]);\n WT = WT/np.tile(WT_s, [1, tr_n_T+tr_n_I]);\n\n #W = np.concatenate([WI,WT]);\n m = np.reshape(range(tr_n_I), [tr_n_I,1]);\n m1 = np.tile(np.concatenate([m, m]),[1,(tr_n_I+tr_n_T)]);\n Y0 = (m1 == m1.T); \n Y1 = np.multiply(Y,(1.-Y0))+Y0;\n h = Y1;\n W_IT = np.matmul(np.matmul(WI,h), WT.T);\n \n return W_IT\n\ndef computer_av(distance, label):\n m, n = np.shape(distance)\n av_precision = np.zeros([m, 1])\n sort = np.argsort(-distance)\n for i in range(m):\n cumulate = 0.0\n tp_counter = 0.0\n for j in range(50):\n if np.sum(np.abs(label[sort[i,j]] - label[i])) == 0:\n tp_counter += 1.0\n cumulate = cumulate + (float(tp_counter)/ float(j+1))\n \n if tp_counter !=0:\n av_precision[i] = cumulate/float(tp_counter)\n mean_precision = np.mean(av_precision)\n return mean_precision \n\n \nif __name__ == '__main__':\n data1 = sci.loadmat('best_data.mat') \n begin = datetime.datetime.now()\n D1 = pdist(np.concatenate([data1['I_tr'], data1['I_te'], \n data1['T_tr'], data1['T_te']]),'cosine');\n Z1 = 1.0-squareform(D1)/2.0;\n h = []\n p = []\n for k in range(10, 1000, 10): \n distance = unifyKnnKernel(Z1,\n len(data1['I_tr']),len(data1['I_te']),\n len(data1['T_tr']),len(data1['T_te']),\n k)\n end = datetime.datetime.now()\n \n \n re1 = computer_av(distance,data1['L_te'].T)\n re2 = computer_av(distance.T, data1['L_te'].T)\n avg = (re1 + re2)/2.0\n print k\n print('The KNN test result:ItoT:{: .4}; TtoI: {: .4}; avg: {: .4}'.format(re1, re2, avg))\n \n\n f1 = open('knn_test.txt', \"a\")\n f1.write('k: ')\n f1.write(str(k))\n f1.write('\\t')\n f1.write('T2I: ')\n f1.write(str(re1))\n f1.write('\\t')\n f1.write('I2T: ')\n f1.write(str(re2))\n f1.write('\\t')\n f1.write('AVG: ')\n f1.write(str(avg))\n f1.write('\\n')\n f1.close()\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rest_framework import serializers
from core.models import Curriculo
class CurriculoSerializer(serializers.ModelSerializer):
class Meta:
model = Curriculo
fields = ('id','name', 'description','image','create_at','update_at')
|
normal
|
{
"blob_id": "029f4f015f558dbd4d6096b00c53f5f0fe69883d",
"index": 1322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Curriculo\n fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'\n",
"step-3": "from rest_framework import serializers\nfrom core.models import Curriculo\n\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Curriculo\n fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'\n",
"step-4": "from rest_framework import serializers\nfrom core.models import Curriculo\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Curriculo\n fields = ('id','name', 'description','image','create_at','update_at')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DIM Station Test
~~~~~~~~~~~~~~~~
Unit test for DIM Station
"""
import unittest
from dimp import ID, NetworkID
class StationTestCase(unittest.TestCase):
def test_identifier(self):
print('\n---------------- %s' % self)
str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'
id1 = ID(str1)
self.assertEqual(id1.address.network, NetworkID.Station)
arr1 = [str1]
self.assertTrue(id1 in arr1)
def test_btc(self):
total_money = 2100 * 10000
package = 50
print('total BTC: %d, first package: %d' % (total_money, package))
spent = 0
order = 0
day = 0
year = 0
while (spent + package) <= total_money:
spent += package
order += 1
if order % (6 * 24) == 0:
day += 1
if day % 365 == 0:
year += 1
print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))
if year % 4 == 0:
package /= 2.0
print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))
def test_dimt(self):
total_money = 15 * 10000 * 10000
package = 2 ** 20
print('total money: %d, first package: %d' % (total_money, package))
spent = 0
day = 0
year = 0
while (spent + package) <= total_money and package >= 1:
spent += package
day += 1
if day % 365 == 0:
year += 1
print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))
if year % 2 == 0:
package /= 2.0
print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "533d0b883a0bbbb148f04826e4c0a2bcc31732e9",
"index": 6702,
"step-1": "<mask token>\n\n\nclass StationTestCase(unittest.TestCase):\n\n def test_identifier(self):\n print('\\n---------------- %s' % self)\n str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'\n id1 = ID(str1)\n self.assertEqual(id1.address.network, NetworkID.Station)\n arr1 = [str1]\n self.assertTrue(id1 in arr1)\n <mask token>\n\n def test_dimt(self):\n total_money = 15 * 10000 * 10000\n package = 2 ** 20\n print('total money: %d, first package: %d' % (total_money, package))\n spent = 0\n day = 0\n year = 0\n while spent + package <= total_money and package >= 1:\n spent += package\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day,\n package, spent))\n if year % 2 == 0:\n package /= 2.0\n print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass StationTestCase(unittest.TestCase):\n\n def test_identifier(self):\n print('\\n---------------- %s' % self)\n str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'\n id1 = ID(str1)\n self.assertEqual(id1.address.network, NetworkID.Station)\n arr1 = [str1]\n self.assertTrue(id1 in arr1)\n\n def test_btc(self):\n total_money = 2100 * 10000\n package = 50\n print('total BTC: %d, first package: %d' % (total_money, package))\n spent = 0\n order = 0\n day = 0\n year = 0\n while spent + package <= total_money:\n spent += package\n order += 1\n if order % (6 * 24) == 0:\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year,\n day, package, spent))\n if year % 4 == 0:\n package /= 2.0\n print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n def test_dimt(self):\n total_money = 15 * 10000 * 10000\n package = 2 ** 20\n print('total money: %d, first package: %d' % (total_money, package))\n spent = 0\n day = 0\n year = 0\n while spent + package <= total_money and package >= 1:\n spent += package\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day,\n package, spent))\n if year % 2 == 0:\n package /= 2.0\n print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StationTestCase(unittest.TestCase):\n\n def test_identifier(self):\n print('\\n---------------- %s' % self)\n str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'\n id1 = ID(str1)\n self.assertEqual(id1.address.network, NetworkID.Station)\n arr1 = [str1]\n self.assertTrue(id1 in arr1)\n\n def test_btc(self):\n total_money = 2100 * 10000\n package = 50\n print('total BTC: %d, first package: %d' % (total_money, package))\n spent = 0\n order = 0\n day = 0\n year = 0\n while spent + package <= total_money:\n spent += package\n order += 1\n if order % (6 * 24) == 0:\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year,\n day, package, spent))\n if year % 4 == 0:\n package /= 2.0\n print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n def test_dimt(self):\n total_money = 15 * 10000 * 10000\n package = 2 ** 20\n print('total money: %d, first package: %d' % (total_money, package))\n spent = 0\n day = 0\n year = 0\n while spent + package <= total_money and package >= 1:\n spent += package\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day,\n package, spent))\n if year % 2 == 0:\n package /= 2.0\n print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nfrom dimp import ID, NetworkID\n\n\nclass StationTestCase(unittest.TestCase):\n\n def test_identifier(self):\n print('\\n---------------- %s' % self)\n str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'\n id1 = ID(str1)\n self.assertEqual(id1.address.network, NetworkID.Station)\n arr1 = [str1]\n self.assertTrue(id1 in arr1)\n\n def test_btc(self):\n total_money = 2100 * 10000\n package = 50\n print('total BTC: %d, first package: %d' % (total_money, package))\n spent = 0\n order = 0\n day = 0\n year = 0\n while spent + package <= total_money:\n spent += package\n order += 1\n if order % (6 * 24) == 0:\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year,\n day, package, spent))\n if year % 4 == 0:\n package /= 2.0\n print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n def test_dimt(self):\n total_money = 15 * 10000 * 10000\n package = 2 ** 20\n print('total money: %d, first package: %d' % (total_money, package))\n spent = 0\n day = 0\n year = 0\n while spent + package <= total_money and package >= 1:\n spent += package\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day,\n package, spent))\n if year % 2 == 0:\n package /= 2.0\n print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (\n year, day, package, spent, total_money - spent))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n DIM Station Test\n ~~~~~~~~~~~~~~~~\n\n Unit test for DIM Station\n\"\"\"\n\nimport unittest\n\nfrom dimp import ID, NetworkID\n\n\nclass StationTestCase(unittest.TestCase):\n\n def test_identifier(self):\n print('\\n---------------- %s' % self)\n str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'\n id1 = ID(str1)\n self.assertEqual(id1.address.network, NetworkID.Station)\n arr1 = [str1]\n self.assertTrue(id1 in arr1)\n\n def test_btc(self):\n total_money = 2100 * 10000\n package = 50\n print('total BTC: %d, first package: %d' % (total_money, package))\n spent = 0\n order = 0\n day = 0\n year = 0\n while (spent + package) <= total_money:\n spent += package\n order += 1\n if order % (6 * 24) == 0:\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))\n if year % 4 == 0:\n package /= 2.0\n print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))\n\n def test_dimt(self):\n total_money = 15 * 10000 * 10000\n package = 2 ** 20\n print('total money: %d, first package: %d' % (total_money, package))\n spent = 0\n day = 0\n year = 0\n while (spent + package) <= total_money and package >= 1:\n spent += package\n day += 1\n if day % 365 == 0:\n year += 1\n print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))\n if year % 2 == 0:\n package /= 2.0\n print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
fixture = None
config = None
@pytest.fixture
def app(request):
global fixture
global config
browser = request.config.getoption("--browser")
if config is None:
conf_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption("--config"))
with open(conf_file_path) as config_file:
config = json.load(config_file)
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=config["baseUrl"])
fixture.session.ensure_login(name=config["login"], pwd=config["password"])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
global fixture
def finalizer():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(finalizer)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--config", action="store", default="config.json")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])
def load_from_module(module):
return importlib.import_module(f'data.{module}').testdata
def load_from_json(jsonfile):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f'data/{jsonfile}.json')) as file:
return jsonpickle.decode(file.read())
|
normal
|
{
"blob_id": "0c0fb3bfb81be5ef6a60584eafeefec61f171679",
"index": 9124,
"step-1": "<mask token>\n\n\[email protected](scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\n<mask token>\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-2": "<mask token>\n\n\[email protected]\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\[email protected](scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\n<mask token>\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-3": "<mask token>\n\n\[email protected]\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\[email protected](scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default='firefox')\n parser.addoption('--config', action='store', default='config.json')\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-4": "<mask token>\nfixture = None\nconfig = None\n\n\[email protected]\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\[email protected](scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default='firefox')\n parser.addoption('--config', action='store', default='config.json')\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-5": "import pytest\nimport json\nimport os.path\nimport importlib\nimport jsonpickle\nfrom fixture.application import Application\n\n\nfixture = None\nconfig = None\n\n\[email protected]\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption(\"--browser\")\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption(\"--config\"))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config[\"baseUrl\"])\n\n fixture.session.ensure_login(name=config[\"login\"], pwd=config[\"password\"])\n\n return fixture\n\n\[email protected](scope=\"session\", autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", action=\"store\", default=\"firefox\")\n parser.addoption(\"--config\", action=\"store\", default=\"config.json\")\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith(\"data_\"):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])\n elif fixture.startswith(\"json_\"):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
30. Convertir P libras inglesas a D dólares y C centavos. Usar el tipo de cambio $2.80 = 1 libra
p=2.80
x=int(input("Desea convertir sus libras a dolar(1) o a centavos(2)"))
if x == 1:
d=float(input("¿Cuantas libras desea convertir a dólar?\n"))
conversion = (d/p)
if x == 2:
c=float(input("¿Cuantas libras desea convertir a centavos?\n"))
conversion = c/100
print("El resultado es:")
print(float(conversion))
|
normal
|
{
"blob_id": "ebc2acbcbab787b07c97b0a4ea8fbaeb9d8e30aa",
"index": 9770,
"step-1": "30. Convertir P libras inglesas a D dólares y C centavos. Usar el tipo de cambio $2.80 = 1 libra\r\np=2.80\r\n\r\nx=int(input(\"Desea convertir sus libras a dolar(1) o a centavos(2)\"))\r\n\r\nif x == 1:\r\n d=float(input(\"¿Cuantas libras desea convertir a dólar?\\n\"))\r\n conversion = (d/p)\r\nif x == 2:\r\n c=float(input(\"¿Cuantas libras desea convertir a centavos?\\n\"))\r\n conversion = c/100\r\nprint(\"El resultado es:\")\r\nprint(float(conversion))\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# 来源知乎 https://zhuanlan.zhihu.com/p/51987247
# 博客 https://www.cnblogs.com/yangecnu/p/Introduce-Binary-Search-Tree.html
"""
二叉查找树 (Binary Search Tree, BST)
特点 : left < root < right
若任意节点的左子树不空,则左子树上所有结点的 值均小于它的根结点的值;
若任意节点的右子树不空,则右子树上所有结点的值均大于它的根结点的值;
任意节点的左、右子树也分别为二叉查找树;
没有键值相等的节点(no duplicate nodes)。
缺点: 不平衡 所以引入平衡二叉树(常用实现方法有红黑树、AVL、替罪羊树、Treap、伸展树等)
本代码实现了 BST
查找 : 任意值 / 最大值 / 最小值 (查找所需最大次数等于高度)
插入 (递归 迭代) : 插入结果一定是插成叶节点了
删除 (递归 迭代): 当删除的节点没有子节点时 当删除的节点只有1个子节点时 当删除的节点有2个子节点时
"""
import logging
import functools
import time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete(tmp.data, node.left)
# Node with only one child or no child
else:
if node.left is None:
node = node.right
else:
node = node.left
return node # 最后层层返回
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
# 首先: 找到要删除的节点result
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
# 有2个节点的情况
if result.left and result.right:
tmp = self._find_extremum(result.left) # 再次: 找到result的successor
self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 "# 有1个或者没有" 的情形
result.data = tmp.data # 再将successor的data赋给要删除的节点result
# 有1个或者没有
else:
if result.left is None:
# print('---')
# print(id(result),id(result.right)) # 46446408 1352705168
result = result.right
# print(id(result)) # 1352705168
else:
result = result.left
# 将 result 赋成 pre_node 的 is_left节点 维护
self._redirect(pre_node, is_left, result) # 对节点pre_node的子节点进行赋值
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
def test_insert(value):
def _test(value, control=False):
tree = BinarySearchTree()
start = time.time()
for i in range(value):
tree.insert(i, isrecursion=control)
end = time.time()
print('the isrecursion control=%s, the time is: %s' % (control, end - start))
_test(value)
_test(value, control=True)
def main():
# test_insert(100)
tree = BinarySearchTree()
nums = [7, 2, 9, 1, 4, 8, 10]
for i in nums:
tree.insert(i)
print(tree)
print(tree.find(4))
tree.insert(3)
print(tree)
tree.delete(2)
print(tree)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "fa46bd784dcfeee4f9012ffb6ab6731d2764c9fa",
"index": 8484,
"step-1": "<mask token>\n\n\nclass BinarySearchTree:\n <mask token>\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n <mask token>\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n <mask token>\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),\n str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n <mask token>\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\n<mask token>\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-4": "<mask token>\nlogging.basicConfig(level=logging.ERROR, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n<mask token>\n\n\nclass Node:\n\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),\n str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\ndef check_null(func):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kw):\n if self.__bool__():\n return func(self, *args, **kw)\n elif func.__name__ in ['_insert', '_insert2']:\n self._root = Node(args[0])\n else:\n print('The tree is empty')\n return wrapper\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\ndef test_insert(value):\n\n def _test(value, control=False):\n tree = BinarySearchTree()\n start = time.time()\n for i in range(value):\n tree.insert(i, isrecursion=control)\n end = time.time()\n print('the isrecursion control=%s, the time is: %s' % (control, end -\n start))\n _test(value)\n _test(value, control=True)\n\n\ndef main():\n tree = BinarySearchTree()\n nums = [7, 2, 9, 1, 4, 8, 10]\n for i in nums:\n tree.insert(i)\n print(tree)\n print(tree.find(4))\n tree.insert(3)\n print(tree)\n tree.delete(2)\n print(tree)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# 来源知乎 https://zhuanlan.zhihu.com/p/51987247\n# 博客 https://www.cnblogs.com/yangecnu/p/Introduce-Binary-Search-Tree.html\n\"\"\"\n二叉查找树 (Binary Search Tree, BST)\n特点 : left < root < right\n 若任意节点的左子树不空,则左子树上所有结点的 值均小于它的根结点的值;\n 若任意节点的右子树不空,则右子树上所有结点的值均大于它的根结点的值;\n 任意节点的左、右子树也分别为二叉查找树;\n 没有键值相等的节点(no duplicate nodes)。\n缺点: 不平衡 所以引入平衡二叉树(常用实现方法有红黑树、AVL、替罪羊树、Treap、伸展树等)\n\n本代码实现了 BST\n查找 : 任意值 / 最大值 / 最小值 (查找所需最大次数等于高度)\n插入 (递归 迭代) : 插入结果一定是插成叶节点了\n删除 (递归 迭代): 当删除的节点没有子节点时 当删除的节点只有1个子节点时 当删除的节点有2个子节点时\n\"\"\"\n\nimport logging\nimport functools\nimport time\n\nlogging.basicConfig(\n level=logging.ERROR,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass Node():\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (\n str(self._data), str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\ndef check_null(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kw):\n if self.__bool__(): # check if the BinarySearchTree() object is None\n return func(self, *args, **kw)\n else:\n if func.__name__ in ['_insert', '_insert2']:\n self._root = Node(args[0])\n else:\n print('The tree is empty')\n\n return wrapper\n\n\n# class Ad():\n# def nam(self):\n# pass\n#\n# print(Ad().nam.__name__)\n# # nam\n\nclass BinarySearchTree():\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n # (node.left, node.right) is tuple\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n # if you want the pre_node and is_left get the specific value, let the node=root\n is_left, _pre_node = None, None\n while node and value != node.data:\n # _pre_node 作用跟踪父节点\n _pre_node = node\n if value < node.data:\n node = node.left\n # is_left 作用跟踪是否为左子节点\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n # while 循环完没找到,则node is None\n # while 循环完找到的话,则node is not None 跳过if,return 找到的node\n if alert and node is None: # alert and (node is None)\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n # *_ 除第一个外的其他返回值\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node): # node 实际往往是root\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n # _insert函数最终结果是\n # 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点\n # 2 node is None,然后将此节点新建出来,执行node = Node(value)\n if node is None:\n node = Node(value)\n else:\n if value < node.data:\n # _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点\n node.left = self._insert(value, node.left)\n elif value > node.data:\n # _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n\n return node # 注意将node返回\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找\n if result is None: # 没找到通过self._redirect() 赋值\n self._redirect(pre_node, is_left, Node(value))\n else: # 找到说明已经存在\n print('already have the value')\n\n # 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print('can\\'t find')\n else: # step1\n\n # If the key to be deleted is smaller than the root's\n # key then it lies in left subtree\n if value < node.data:\n node.left = self._delete(value, node.left)\n\n # If the kye to be delete is greater than the root's key\n # then it lies in right subtree\n elif value > node.data:\n node.right = self._delete(value, node.right)\n\n # If key is same as root's key, then this is the node\n # to be deleted\n else: # step2\n\n # Node with two children: Get the inorder successor 中序继承者\n\n # 最后node.left = self._delete(tmp.data, node.left)其实转化成了\n # 后边 Node with only one child or no child 的情形\n ### 可以找左子树的最大值或者右子树的最小值作为successor\n ### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点\n ### 所以转化成了前边 Node with only one child or no child 的情形\n\n if node.left and node.right:\n # find the largest in the left subtree as successor\n tmp = self._find_extremum(node.left) # default by max\n # Copy the inorder successor's content to this node\n node.data = tmp.data\n # Delete the inorder successor\n node.left = self._delete(tmp.data, node.left)\n\n # Node with only one child or no child\n else:\n if node.left is None:\n node = node.right\n else:\n node = node.left\n return node # 最后层层返回\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n # 首先: 找到要删除的节点result\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n # 有2个节点的情况\n if result.left and result.right:\n tmp = self._find_extremum(result.left) # 再次: 找到result的successor\n self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 \"# 有1个或者没有\" 的情形\n result.data = tmp.data # 再将successor的data赋给要删除的节点result\n # 有1个或者没有\n else:\n if result.left is None:\n # print('---')\n # print(id(result),id(result.right)) # 46446408 1352705168\n result = result.right\n # print(id(result)) # 1352705168\n else:\n result = result.left\n # 将 result 赋成 pre_node 的 is_left节点 维护\n self._redirect(pre_node, is_left, result) # 对节点pre_node的子节点进行赋值\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\ndef test_insert(value):\n def _test(value, control=False):\n tree = BinarySearchTree()\n start = time.time()\n for i in range(value):\n tree.insert(i, isrecursion=control)\n end = time.time()\n print('the isrecursion control=%s, the time is: %s' % (control, end - start))\n\n _test(value)\n _test(value, control=True)\n\n\ndef main():\n # test_insert(100)\n tree = BinarySearchTree()\n nums = [7, 2, 9, 1, 4, 8, 10]\n for i in nums:\n tree.insert(i)\n\n print(tree)\n print(tree.find(4))\n tree.insert(3)\n print(tree)\n tree.delete(2)\n print(tree)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
15,
19,
26,
31,
34
]
}
|
[
15,
19,
26,
31,
34
] |
from django.contrib.auth.models import User
from django.db import models
class QueuedSpace(models.Model):
""" Stores space json for possible further editing before being sent to the server.
q_etag should update on every save so conflicts can be checked for in queued items.
"""
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True, related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True, related_name='approved_by')
def __unicode__(self):
return "id: %s (marked %s on %s by %s)" % (self.space_id, self.status, self.last_modified, self.modified_by)
#TODO: put in an etag generator
|
normal
|
{
"blob_id": "ff09993a4f8fed65fa00c065eb5cfa41e7f9dcc1",
"index": 4411,
"step-1": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-2": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n <mask token>\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-3": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True, related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True, related_name='approved_by')\n\n def __unicode__(self):\n return \"id: %s (marked %s on %s by %s)\" % (self.space_id, self.status, self.last_modified, self.modified_by)\n\n #TODO: put in an etag generator\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
from wordcloud import WordCloud, ImageColorGenerator
import numpy as np
from PIL import Image
def word2cloud(text: str, mask_image: Image=None):
if mask_image == None:
wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode='RGBA',
background_color=None).generate(text)
else:
mask = np.array(mask_image) # 使用mask,最好界限分明对比强烈的图形
image_colors = ImageColorGenerator(mask) # 提取蒙版颜色
wc = WordCloud(mask=mask, color_func=image_colors,
width=800, height=600,
font_path='simhei.ttf', mode='RGBA',
background_color=None).generate(text)
img_res = wc.to_image()
return img_res
# 这个大小只是大概,若要精细化,可用结巴统计词频
# freq=jieba.analyse.extract_tags(text, topK=200, withWeight=True)
# freq={w[0]:w[1] for w in freq}
# WordCloud(...).generate_from_frequencies(freq)
# plt.imshow(wc,interpolation='bilinear') # 插值颜色均匀
# plt.axis('off')
# plt.show()
#wc.to_file('wordcloud.png') # 保存
|
normal
|
{
"blob_id": "f9310aa6c26ec10041dac272fa17ac21f74c21ac",
"index": 9326,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode=\n 'RGBA', background_color=None).generate(text)\n else:\n mask = np.array(mask_image)\n image_colors = ImageColorGenerator(mask)\n wc = WordCloud(mask=mask, color_func=image_colors, width=800,\n height=600, font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n",
"step-3": "from wordcloud import WordCloud, ImageColorGenerator\nimport numpy as np\nfrom PIL import Image\n\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode=\n 'RGBA', background_color=None).generate(text)\n else:\n mask = np.array(mask_image)\n image_colors = ImageColorGenerator(mask)\n wc = WordCloud(mask=mask, color_func=image_colors, width=800,\n height=600, font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n",
"step-4": "# -*- coding: utf-8 -*-\nfrom wordcloud import WordCloud, ImageColorGenerator\nimport numpy as np\nfrom PIL import Image\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode='RGBA',\n background_color=None).generate(text)\n else:\n mask = np.array(mask_image) # 使用mask,最好界限分明对比强烈的图形\n image_colors = ImageColorGenerator(mask) # 提取蒙版颜色\n wc = WordCloud(mask=mask, color_func=image_colors,\n width=800, height=600,\n font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n\n\n# 这个大小只是大概,若要精细化,可用结巴统计词频\n# freq=jieba.analyse.extract_tags(text, topK=200, withWeight=True)\n# freq={w[0]:w[1] for w in freq}\n# WordCloud(...).generate_from_frequencies(freq)\n\n# plt.imshow(wc,interpolation='bilinear') # 插值颜色均匀\n# plt.axis('off')\n# plt.show()\n\n#wc.to_file('wordcloud.png') # 保存",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sys import stdin
Read = stdin.readline
INF = int(1e9)
n, m = map(int, Read().split())
graph = [[INF] * (n+1) for _ in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n+1):
for i in range(1, n+1):
for j in range(1, n+1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
result = 0
for i in range(1, n+1):
count = 0
for j in range(1, n+1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result)
|
normal
|
{
"blob_id": "6ec39aa712c8abe610418e410883ff168d73126d",
"index": 3292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n<mask token>\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-3": "<mask token>\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-4": "from sys import stdin\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-5": "from sys import stdin\nRead = stdin.readline\nINF = int(1e9)\n\nn, m = map(int, Read().split())\ngraph = [[INF] * (n+1) for _ in range(n+1)]\n\nfor i in range(1, n+1):\n for j in range(1, n+1):\n if i == j:\n graph[i][j] = 0\n\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\n\nfor k in range(1, n+1):\n for i in range(1, n+1):\n for j in range(1, n+1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n\nresult = 0\nfor i in range(1, n+1):\n count = 0\n for j in range(1, n+1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n\n if count == n:\n result += 1\n\nprint(result)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def fonct(valeur, a= None):
if type(a) is list:
a.append(valeur)
# a+= valeur
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a+= valeur
return(a)
print(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]
print(fonct(4, 'eg' )) # eg4
print(fonct(4, (1,2,3))) # (1, 2, 3, 4)
print(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)
|
normal
|
{
"blob_id": "2a13fffa105a5dd546c30c892e59888eb6ead996",
"index": 4645,
"step-1": "<mask token>\n",
"step-2": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\n<mask token>\n",
"step-3": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\nprint(fonct(4, [1, 2, 3]))\nprint(fonct(4, 'eg'))\nprint(fonct(4, (1, 2, 3)))\nprint(fonct(4, {1, 2, 3}))\n",
"step-4": "def fonct(valeur, a= None):\n if type(a) is list:\n a.append(valeur)\n # a+= valeur\n elif type(a) is tuple: \n a += tuple((valeur,)) \n elif type(a) is str: \n a += str(valeur) \n elif type(a) is set: \n a.add(valeur) \n else:\n a+= valeur\n return(a)\n\nprint(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]\nprint(fonct(4, 'eg' )) # eg4\nprint(fonct(4, (1,2,3))) # (1, 2, 3, 4)\nprint(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pynucastro as pyna
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(["h1", "he4",
"c12", "c13",
"n13", "n14", "n15",
"o14", "o15", "o16","o17","o18",
"f17", "f18","f19",
"ne18", "ne19", "ne20",
"mg22", "mg24"],
with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=["fe56"])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile="cno_extras.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])
rc.plot(outfile="cno_extras_hide_alpha.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],
rotated=True, highlight_filter_function=lambda r: r.Q > 0,
curved_edges=True, hide_xalpha=True)
|
normal
|
{
"blob_id": "39b07f1a515787e80a1fb822e67e19e2301b894a",
"index": 3285,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc.write_network()\n<mask token>\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-3": "<mask token>\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-4": "import pynucastro as pyna\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-5": "import pynucastro as pyna\n\nrl = pyna.ReacLibLibrary()\n\nh_burn = rl.linking_nuclei([\"h1\", \"he4\",\n \"c12\", \"c13\",\n \"n13\", \"n14\", \"n15\",\n \"o14\", \"o15\", \"o16\",\"o17\",\"o18\",\n \"f17\", \"f18\",\"f19\",\n \"ne18\", \"ne19\", \"ne20\",\n \"mg22\", \"mg24\"],\n with_reverse=False)\n\n\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=[\"fe56\"])\n\nrc.write_network()\n\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\n\nrc.plot(outfile=\"cno_extras.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])\nrc.plot(outfile=\"cno_extras_hide_alpha.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],\n rotated=True, highlight_filter_function=lambda r: r.Q > 0,\n curved_edges=True, hide_xalpha=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import mxnet as mx
import numpy as np
import logging
# Example performance:
# INFO:root:Epoch[34] Train-accuracy=0.601388
# INFO:root:Epoch[34] Validation-accuracy=0.620949
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# running device
dev = mx.gpu()
# batch size and input shape
batch_size = 64
data_shape = (3, 36, 36)
# training data info for learning rate reduction
num_examples = 20000
epoch_size = num_examples / batch_size
lr_factor_epoch = 15
# model saving parameter
model_prefix = "./models/sample_net"
# train data iterator
train = mx.io.ImageRecordIter(
path_imgrec = "tr.rec",
mean_r = 128,
mean_g = 128,
mean_b = 128,
scale = 0.0078125,
max_aspect_ratio = 0.35,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
# validate data iterator
val = mx.io.ImageRecordIter(
path_imgrec = "va.rec",
mean_r = 128,
mean_b = 128,
mean_g = 128,
scale = 0.0078125,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
# network definition
# stage 1
net = mx.sym.Variable("data")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 2
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 3
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="avg", kernel=(9, 9), stride=(1, 1))
# stage 4
net = mx.sym.Flatten(data=net)
net = mx.sym.Dropout(data=net, p=0.25)
net = mx.sym.FullyConnected(data=net, num_hidden=121)
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
# Model parameter
# This model will reduce learning rate by factor 0.1 for every 15 epoch
model = mx.model.FeedForward(
ctx = dev,
symbol = net,
num_epoch = 35,
learning_rate = 0.01,
momentum = 0.9,
wd = 0.0001,
clip_gradient = 5,
lr_scheduler = mx.lr_scheduler.FactorScheduler(step=epoch_size * lr_factor_epoch, factor = 0.1),
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34))
# fit the model
model.fit(
X = train,
eval_data = val,
batch_end_callback = mx.callback.Speedometer(batch_size, 50),
epoch_end_callback = mx.callback.do_checkpoint(model_prefix))
|
normal
|
{
"blob_id": "e82b9aa0f7dc669b3d5622c093b766c7e168221c",
"index": 5757,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogger.setLevel(logging.DEBUG)\n<mask token>\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-3": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\ndev = mx.gpu()\nbatch_size = 64\ndata_shape = 3, 36, 36\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\nmodel_prefix = './models/sample_net'\ntrain = mx.io.ImageRecordIter(path_imgrec='tr.rec', mean_r=128, mean_g=128,\n mean_b=128, scale=0.0078125, max_aspect_ratio=0.35, data_shape=\n data_shape, batch_size=batch_size, rand_crop=True, rand_mirror=True)\nval = mx.io.ImageRecordIter(path_imgrec='va.rec', mean_r=128, mean_b=128,\n mean_g=128, scale=0.0078125, rand_crop=False, rand_mirror=False,\n data_shape=data_shape, batch_size=batch_size)\nnet = mx.sym.Variable('data')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='avg', kernel=(9, 9), stride=(1, 1))\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\nmodel = mx.model.FeedForward(ctx=dev, symbol=net, num_epoch=35,\n learning_rate=0.01, momentum=0.9, wd=0.0001, clip_gradient=5,\n lr_scheduler=mx.lr_scheduler.FactorScheduler(step=epoch_size *\n lr_factor_epoch, factor=0.1), initializer=mx.init.Xavier(factor_type=\n 'in', magnitude=2.34))\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-4": "import mxnet as mx\nimport numpy as np\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\ndev = mx.gpu()\nbatch_size = 64\ndata_shape = 3, 36, 36\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\nmodel_prefix = './models/sample_net'\ntrain = mx.io.ImageRecordIter(path_imgrec='tr.rec', mean_r=128, mean_g=128,\n mean_b=128, scale=0.0078125, max_aspect_ratio=0.35, data_shape=\n data_shape, batch_size=batch_size, rand_crop=True, rand_mirror=True)\nval = mx.io.ImageRecordIter(path_imgrec='va.rec', mean_r=128, mean_b=128,\n mean_g=128, scale=0.0078125, rand_crop=False, rand_mirror=False,\n data_shape=data_shape, batch_size=batch_size)\nnet = mx.sym.Variable('data')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='avg', kernel=(9, 9), stride=(1, 1))\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\nmodel = mx.model.FeedForward(ctx=dev, symbol=net, num_epoch=35,\n learning_rate=0.01, momentum=0.9, wd=0.0001, clip_gradient=5,\n lr_scheduler=mx.lr_scheduler.FactorScheduler(step=epoch_size *\n lr_factor_epoch, factor=0.1), initializer=mx.init.Xavier(factor_type=\n 'in', magnitude=2.34))\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-5": "import mxnet as mx\nimport numpy as np\nimport logging\n\n# Example performance:\n# INFO:root:Epoch[34] Train-accuracy=0.601388\n# INFO:root:Epoch[34] Validation-accuracy=0.620949\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# running device\ndev = mx.gpu()\n# batch size and input shape\nbatch_size = 64\ndata_shape = (3, 36, 36)\n# training data info for learning rate reduction\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\n# model saving parameter\nmodel_prefix = \"./models/sample_net\"\n\n# train data iterator\ntrain = mx.io.ImageRecordIter(\n path_imgrec = \"tr.rec\",\n mean_r = 128,\n mean_g = 128,\n mean_b = 128,\n scale = 0.0078125,\n max_aspect_ratio = 0.35,\n data_shape = data_shape,\n batch_size = batch_size,\n rand_crop = True,\n rand_mirror = True)\n\n# validate data iterator\nval = mx.io.ImageRecordIter(\n path_imgrec = \"va.rec\",\n mean_r = 128,\n mean_b = 128,\n mean_g = 128,\n scale = 0.0078125,\n rand_crop = False,\n rand_mirror = False,\n data_shape = data_shape,\n batch_size = batch_size)\n\n# network definition\n# stage 1\nnet = mx.sym.Variable(\"data\")\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n# stage 2\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n# stage 3\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"avg\", kernel=(9, 9), stride=(1, 1))\n# stage 4\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\n\n# Model parameter\n# This model will reduce learning rate by factor 0.1 for every 15 epoch\nmodel = mx.model.FeedForward(\n ctx = dev,\n symbol = net,\n num_epoch = 35,\n learning_rate = 0.01,\n momentum = 0.9,\n wd = 0.0001,\n clip_gradient = 5,\n lr_scheduler = mx.lr_scheduler.FactorScheduler(step=epoch_size * lr_factor_epoch, factor = 0.1),\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34))\n\n# fit the model\nmodel.fit(\n X = train,\n eval_data = val,\n batch_end_callback = mx.callback.Speedometer(batch_size, 50),\n epoch_end_callback = mx.callback.do_checkpoint(model_prefix))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Script for converting the new csv files to the desirable json format
'''
import codecs
import json
import re
def creeper():
'''
Settings for creeper file
'''
ccPrefix = False
inFilename = u'creeper.csv'
outFilename = u'Creeper.json'
mappingFile = u'creeper-mappings.json'
run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)
def mediaCreeper():
'''
Settings for mediaCreeper file
'''
ccPrefix = True
inFilename = u'mediacreeper.csv'
outFilename = u'MediaCreeper.json'
run(inFilename, outFilename, ccPrefix)
def run(inFilename, outFilename, ccPrefix,
mappingFile=None, source=u'http://b19.se/data/'):
'''
Run either file depending on settings
'''
# load mappings
mappings = {}
if mappingFile:
f = codecs.open(mappingFile, 'r', 'utf-8')
mappings = json.load(f)
f.close()
# load csv
f = codecs.open(inFilename, 'r', 'utf-8')
lines = f.read().split('\n')
f.close()
data = {}
dates = []
for l in lines:
if len(l) == 0 or l.startswith(u'#'):
continue
start, end, cc, caption, updated = l.split(';')
if ccPrefix:
caption = u'[%s] %s' % (cc, caption)
if caption in mappings.keys():
caption = mappings[caption]
if caption in data.keys():
data[caption].append([start, end])
else:
data[caption] = [[start, end], ]
dates.append(updated)
# create metadata entry
dates = sorted(list(set(dates)))
metadata = {
'source': source,
'oldest data': dates[0],
'newest data': dates[-1]}
data[u'@metadata'] = metadata
# output
f = codecs.open(outFilename, 'w', 'utf-8')
# f.write(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))
# compactify it without minimizing
txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
txt = re.sub(
r'\[\n "([^"]*)", \n "([^"]*)"\n \]',
r'["\1", "\2"]',
txt)
txt = txt.replace(u', \n [', u',\n [')
f.write(txt)
f.close()
if __name__ == '__main__':
creeper()
mediaCreeper()
|
normal
|
{
"blob_id": "5a5b2d0ade5b66981218b4ecf15a2253b7d665f9",
"index": 3273,
"step-1": "<mask token>\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-4": "<mask token>\nimport codecs\nimport json\nimport re\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nScript for converting the new csv files to the desirable json format\n'''\nimport codecs\nimport json\nimport re\n\n\ndef creeper():\n '''\n Settings for creeper file\n '''\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n '''\n Settings for mediaCreeper file\n '''\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix,\n mappingFile=None, source=u'http://b19.se/data/'):\n '''\n Run either file depending on settings\n '''\n # load mappings\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n\n # load csv\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end], ]\n dates.append(updated)\n\n # create metadata entry\n dates = sorted(list(set(dates)))\n metadata = {\n 'source': source,\n 'oldest data': dates[0],\n 'newest data': dates[-1]}\n data[u'@metadata'] = metadata\n\n # output\n f = codecs.open(outFilename, 'w', 'utf-8')\n # f.write(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))\n\n # compactify it without minimizing\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n r'\\[\\n \"([^\"]*)\", \\n \"([^\"]*)\"\\n \\]',\n r'[\"\\1\", \"\\2\"]',\n txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
z = np.linspace(2,10,5) #from 2 to 10, with 5 elements
# OUT: array( [ 2. , 4. , 6. , 8. , 10. ] )
np.random.seed(0)
z1 = np.random.randint(10, size = 6)
# OUT: array( [5, 0, 3, 3, 7, 9] )
z = np.array([1,2,3,4,5])
z < 3
# OUT: array([T,T,F,F,F])
z[z<3]
# OUT: array([1,2])
a = np.array([1,2,3,4,5])
b = np.array([6,7,8,9,10])
a + b # - * /
# OUT: array([7,9,11,13,15])
a + 30 # - * /
# OUT: array([31,32,33,34,35])
a = np.array([[1,2,3],[4,5,6]])
print(a)
# OUT: [[1 2 3]
# [4 5 6]]
a.shape()
# OUT: (2,3)
a.ndim()
# OUT: 2
a[0,2]
# OUT: 3
a[0,:]
# array([1,2,3])
a[:,1]
# array([2,4])
np.min(a) #or MAX|SUM
# OUT: 1
np.zeros(5)
# OUT: array([0.,0.,0.,0.,0.])
np.zeros_like([[10,10],[1,1]])
# OUT: [[0,0],[0,0]]
np.ones(3,2)
# OUT: array([[1,1],
# [1,1],
# [1,1]])
np.full((2,2),100)
# OUT: array([[100,100],
# [100,100]])
np.full_like((2,2), 10, dtype = np.int)
# OUT: [[10,10][10,10]]
np.random.rand(2,4)
#OUT: array([[x,x,x,x],
# [x,x,x,x]])
np.random.randint(10)
#OUT: x # random from 0 to 10 (non include)
np.random.randint(5,10, size=(2,2)) #from 5 to 10(non include)
#OUT: array([[x,x],
# [x,x]])
a = [np.pi,-np.pi,0]
np.cos(a)
#OUT: [-1,-1,1]
np.arange(10)
#OUT: [0,1,...,9]
v1 = np.array([1,2,3])
v2 = np.array([4,5,6])
np.vstack([v1,v2,v1])
#1 2 3
#4 5 6
#1 2 3
a = np.array([1,2,3,4,5,6,7,8,9])
#a[[1,2,8]]
#OUT: 2,3,9
filedata = np.genfromtxt("name.txt", delimiter = ",")
# ?
filedata = filedata.astype("type") #!
# filedata[filedata > 50]
# ((filedata > 50) & (filedata < 100))
# bool Boolean (True or False) stored as a bit
# inti Platform integer (normally either int32 or int64)
# int8 Byte (-128 to 127)
# int16 Integer (-32768 to 32767)
# int32 Integer (-2 ** 31 to 2 ** 31 -1)
# int64 Integer (-2 ** 63 to 2 ** 63 -1)
# uint8 Unsigned integer (0 to 255)
# uint16 Unsigned integer (0 to 65535)
# uint32 Unsigned integer (0 to 2 ** 32 - 1)
# uint64 Unsigned integer (0 to 2 ** 64 - 1)
# float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa
# float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa
# float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa
a = np.arange(7, dtype='f')
# Integer i
# Unsigned integer u
# Single precision float f
# Double precision float d
# Boolean b
# Complex D
# String S
# Unicode U
# Void V
x = np.arange(0,10,2) # x=([0,2,4,6,8])
y = np.arange(5) # y=([0,1,2,3,4])
m = np.vstack([x,y]) # m=([[0,2,4,6,8],
# [0,1,2,3,4]])
xy = np.hstack([x,y]) # xy =([0,2,4,6,8,0,1,2,3,4])
|
normal
|
{
"blob_id": "be5147efda879165107378527ebf44890c03be75",
"index": 6679,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(0)\n<mask token>\nz < 3\nz[z < 3]\n<mask token>\na + b\na + 30\n<mask token>\nprint(a)\na.shape()\na.ndim()\na[0, 2]\na[0, :]\na[:, 1]\nnp.min(a)\nnp.zeros(5)\nnp.zeros_like([[10, 10], [1, 1]])\nnp.ones(3, 2)\nnp.full((2, 2), 100)\nnp.full_like((2, 2), 10, dtype=np.int)\nnp.random.rand(2, 4)\nnp.random.randint(10)\nnp.random.randint(5, 10, size=(2, 2))\n<mask token>\nnp.cos(a)\nnp.arange(10)\n<mask token>\nnp.vstack([v1, v2, v1])\n<mask token>\n",
"step-3": "<mask token>\nz = np.linspace(2, 10, 5)\nnp.random.seed(0)\nz1 = np.random.randint(10, size=6)\nz = np.array([1, 2, 3, 4, 5])\nz < 3\nz[z < 3]\na = np.array([1, 2, 3, 4, 5])\nb = np.array([6, 7, 8, 9, 10])\na + b\na + 30\na = np.array([[1, 2, 3], [4, 5, 6]])\nprint(a)\na.shape()\na.ndim()\na[0, 2]\na[0, :]\na[:, 1]\nnp.min(a)\nnp.zeros(5)\nnp.zeros_like([[10, 10], [1, 1]])\nnp.ones(3, 2)\nnp.full((2, 2), 100)\nnp.full_like((2, 2), 10, dtype=np.int)\nnp.random.rand(2, 4)\nnp.random.randint(10)\nnp.random.randint(5, 10, size=(2, 2))\na = [np.pi, -np.pi, 0]\nnp.cos(a)\nnp.arange(10)\nv1 = np.array([1, 2, 3])\nv2 = np.array([4, 5, 6])\nnp.vstack([v1, v2, v1])\na = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\nfiledata = np.genfromtxt('name.txt', delimiter=',')\nfiledata = filedata.astype('type')\na = np.arange(7, dtype='f')\nx = np.arange(0, 10, 2)\ny = np.arange(5)\nm = np.vstack([x, y])\nxy = np.hstack([x, y])\n",
"step-4": "import numpy as np\nz = np.linspace(2, 10, 5)\nnp.random.seed(0)\nz1 = np.random.randint(10, size=6)\nz = np.array([1, 2, 3, 4, 5])\nz < 3\nz[z < 3]\na = np.array([1, 2, 3, 4, 5])\nb = np.array([6, 7, 8, 9, 10])\na + b\na + 30\na = np.array([[1, 2, 3], [4, 5, 6]])\nprint(a)\na.shape()\na.ndim()\na[0, 2]\na[0, :]\na[:, 1]\nnp.min(a)\nnp.zeros(5)\nnp.zeros_like([[10, 10], [1, 1]])\nnp.ones(3, 2)\nnp.full((2, 2), 100)\nnp.full_like((2, 2), 10, dtype=np.int)\nnp.random.rand(2, 4)\nnp.random.randint(10)\nnp.random.randint(5, 10, size=(2, 2))\na = [np.pi, -np.pi, 0]\nnp.cos(a)\nnp.arange(10)\nv1 = np.array([1, 2, 3])\nv2 = np.array([4, 5, 6])\nnp.vstack([v1, v2, v1])\na = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\nfiledata = np.genfromtxt('name.txt', delimiter=',')\nfiledata = filedata.astype('type')\na = np.arange(7, dtype='f')\nx = np.arange(0, 10, 2)\ny = np.arange(5)\nm = np.vstack([x, y])\nxy = np.hstack([x, y])\n",
"step-5": "import numpy as np\n\n\nz = np.linspace(2,10,5) #from 2 to 10, with 5 elements\n# OUT: array( [ 2. , 4. , 6. , 8. , 10. ] )\n\nnp.random.seed(0)\nz1 = np.random.randint(10, size = 6)\n# OUT: array( [5, 0, 3, 3, 7, 9] )\n\nz = np.array([1,2,3,4,5])\nz < 3\n# OUT: array([T,T,F,F,F])\nz[z<3]\n# OUT: array([1,2])\n\na = np.array([1,2,3,4,5])\nb = np.array([6,7,8,9,10])\n\na + b # - * /\n# OUT: array([7,9,11,13,15])\na + 30 # - * /\n# OUT: array([31,32,33,34,35])\n\na = np.array([[1,2,3],[4,5,6]])\nprint(a)\n# OUT: [[1 2 3]\n# [4 5 6]]\na.shape()\n# OUT: (2,3)\na.ndim()\n# OUT: 2\na[0,2]\n# OUT: 3\na[0,:]\n# array([1,2,3])\na[:,1]\n# array([2,4])\n\nnp.min(a) #or MAX|SUM\n# OUT: 1\n\n\n\nnp.zeros(5)\n# OUT: array([0.,0.,0.,0.,0.])\nnp.zeros_like([[10,10],[1,1]])\n# OUT: [[0,0],[0,0]]\nnp.ones(3,2)\n# OUT: array([[1,1],\n#\t [1,1],\n#\t [1,1]])\nnp.full((2,2),100)\n# OUT: array([[100,100],\n#\t [100,100]])\nnp.full_like((2,2), 10, dtype = np.int)\n# OUT: [[10,10][10,10]]\n\n\nnp.random.rand(2,4)\n#OUT: array([[x,x,x,x],\n#\t [x,x,x,x]])\n\nnp.random.randint(10) \n#OUT: x # random from 0 to 10 (non include)\n\nnp.random.randint(5,10, size=(2,2)) #from 5 to 10(non include)\n#OUT: array([[x,x],\n#\t [x,x]])\n\n\na = [np.pi,-np.pi,0]\nnp.cos(a) \n#OUT: [-1,-1,1]\n\n\nnp.arange(10)\n#OUT: [0,1,...,9]\n\n\nv1 = np.array([1,2,3])\nv2 = np.array([4,5,6])\n\nnp.vstack([v1,v2,v1])\n\n#1 2 3\n#4 5 6\n#1 2 3\n\n\n\na = np.array([1,2,3,4,5,6,7,8,9])\n#a[[1,2,8]]\n#OUT: 2,3,9\n\n\nfiledata = np.genfromtxt(\"name.txt\", delimiter = \",\")\n# ?\nfiledata = filedata.astype(\"type\") #!\n# filedata[filedata > 50] \n# ((filedata > 50) & (filedata < 100))\n\n\n\n\n# bool Boolean (True or False) stored as a bit\n# inti Platform integer (normally either int32 or int64)\n# int8 Byte (-128 to 127)\n# int16 Integer (-32768 to 32767)\n# int32 Integer (-2 ** 31 to 2 ** 31 -1)\n# int64 Integer (-2 ** 63 to 2 ** 63 -1)\n# uint8 Unsigned integer (0 to 255)\n# uint16 Unsigned integer (0 to 65535)\n# uint32 Unsigned integer (0 to 2 ** 32 - 1)\n# uint64 Unsigned integer (0 to 2 ** 64 - 1)\n# float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa\n# float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa\n# float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa\n\n\na = np.arange(7, dtype='f')\n# Integer i\n# Unsigned integer u\n# Single precision float f\n# Double precision float d\n# Boolean b\n# Complex D\n# String S\n# Unicode U\n# Void V\n\n\n\nx = np.arange(0,10,2) # x=([0,2,4,6,8])\ny = np.arange(5) # y=([0,1,2,3,4])\nm = np.vstack([x,y]) # m=([[0,2,4,6,8],\n # [0,1,2,3,4]])\nxy = np.hstack([x,y]) # xy =([0,2,4,6,8,0,1,2,3,4])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Kontrollülesanne 7.4c - Elutee number (tähtaeg 28.okt. (incl))
Maksimaalne failide arv: 1
Töö liik: Individuaaltöö
Numeroloogias peetakse tähtsaks elutee numbrit, mille arvutamiseks tuleb liita kokku sünnikuupäeva ja -aasta numbrid
nii, et jõutakse lõpuks ühe numbrini.
Näiteks, oletame, et sünnikuupäev on 15.05.1975. Teha tuleb niisiis järgnev tehe: 1+5+5+1+9+7+5 = 33, 3+3 = 6, seega on
elutee number 6.
Aga kui sünnikuupäevaks on nt. 17.11.1981, siis arvutada tuleb järgmiselt: 1+7+1+1+1+9+8+1 = 29, 2+9 = 11, 1+1=2.
Elutee numbrit arvutab järgmine (rekursiivne) funktsioon, mis võtab argumendiks sünnikuupäeva:
#argument s on sõne, esialgu see on kuupäev, edasi juba arvutatud arv
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
Failis sunnikuupaevad.txt on mingi hulk sünnikuupäevi, iga sünnikuupäev eraldi real. Kirjutada programm, mis tekitab
selle faili põhjal 9 tekstifaili nimedega eluteenumber1.txt, eluteenumber2.txt, ..., eluteenumber9.txt ning jagab
sünnikuupäevad nendesse failidesse vastavalt elutee numbrile (elutee numbri arvutamiseks kasutada funktsiooni elutee).
Näiteks sünnikuupäev 15.05.1975 tuleb kirjutada faili eluteenumber6.txt.
Näide programmi tööst:
Kui faili sunnikuupaevad.txt sisu on
07.02.1969
17.11.1981
29.03.1955
siis faili eluteenumber7.txt sisu peab olema
07.02.1969
29.03.1955
ja faili eluteenumber2.txt sisu peab olema
17.11.1981
Kõik ülejäänud 7 faili peavad selle näite korral küll tekkima, aga jääma tühjaks.
"""
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
for i in range(1,10):
fileName = "eluteenumber" + str(i) + ".txt"
f = open(fileName, "a")
# inputFile = input("Palun sisestage sünnikuupäevade faili nimi: ") TEST EI TAHA FAILI SISESTAMIST NÄHAGI!
file = open("sunnikuupaevad.txt", encoding="UTF-8")
for row in file:
fileName = "eluteenumber" + str(elutee(row.strip())) + ".txt"
file = open(fileName, "a", encoding="UTF-8")
file.write(str(row))
file.close()
file.close()
|
normal
|
{
"blob_id": "971187dc0e0f02282c8945940d07c011e247667a",
"index": 9401,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\nfor i in range(1, 10):\n fileName = 'eluteenumber' + str(i) + '.txt'\n f = open(fileName, 'a')\n<mask token>\nfor row in file:\n fileName = 'eluteenumber' + str(elutee(row.strip())) + '.txt'\n file = open(fileName, 'a', encoding='UTF-8')\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-4": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\nfor i in range(1, 10):\n fileName = 'eluteenumber' + str(i) + '.txt'\n f = open(fileName, 'a')\nfile = open('sunnikuupaevad.txt', encoding='UTF-8')\nfor row in file:\n fileName = 'eluteenumber' + str(elutee(row.strip())) + '.txt'\n file = open(fileName, 'a', encoding='UTF-8')\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-5": "\"\"\"\nKontrollülesanne 7.4c - Elutee number (tähtaeg 28.okt. (incl))\nMaksimaalne failide arv: 1\nTöö liik: Individuaaltöö\n\n\nNumeroloogias peetakse tähtsaks elutee numbrit, mille arvutamiseks tuleb liita kokku sünnikuupäeva ja -aasta numbrid\nnii, et jõutakse lõpuks ühe numbrini.\n\nNäiteks, oletame, et sünnikuupäev on 15.05.1975. Teha tuleb niisiis järgnev tehe: 1+5+5+1+9+7+5 = 33, 3+3 = 6, seega on\nelutee number 6.\n\nAga kui sünnikuupäevaks on nt. 17.11.1981, siis arvutada tuleb järgmiselt: 1+7+1+1+1+9+8+1 = 29, 2+9 = 11, 1+1=2.\n\nElutee numbrit arvutab järgmine (rekursiivne) funktsioon, mis võtab argumendiks sünnikuupäeva:\n\n#argument s on sõne, esialgu see on kuupäev, edasi juba arvutatud arv\ndef elutee(s):\n #abimuutaja numbri arvutamiseks\n n = 0\n # tsükkel, mis vaatab iga sümboli sõnes\n for i in s:\n if i != \".\":\n n += int(i) # arvutame summat\n # kui saadud arv on väiksem kui 10, siis ongi elutee number käes\n if n < 10:\n return n\n # kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,\n #selleks kasutame jälle sama funktsiooni\n else:\n return elutee(str(n))\nFailis sunnikuupaevad.txt on mingi hulk sünnikuupäevi, iga sünnikuupäev eraldi real. Kirjutada programm, mis tekitab\nselle faili põhjal 9 tekstifaili nimedega eluteenumber1.txt, eluteenumber2.txt, ..., eluteenumber9.txt ning jagab\nsünnikuupäevad nendesse failidesse vastavalt elutee numbrile (elutee numbri arvutamiseks kasutada funktsiooni elutee).\nNäiteks sünnikuupäev 15.05.1975 tuleb kirjutada faili eluteenumber6.txt.\n\nNäide programmi tööst:\n\nKui faili sunnikuupaevad.txt sisu on\n\n 07.02.1969\n 17.11.1981\n 29.03.1955\nsiis faili eluteenumber7.txt sisu peab olema\n\n 07.02.1969\n 29.03.1955\nja faili eluteenumber2.txt sisu peab olema\n\n 17.11.1981\nKõik ülejäänud 7 faili peavad selle näite korral küll tekkima, aga jääma tühjaks.\n\"\"\"\n\ndef elutee(s):\n #abimuutaja numbri arvutamiseks\n n = 0\n # tsükkel, mis vaatab iga sümboli sõnes\n for i in s:\n if i != \".\":\n n += int(i) # arvutame summat\n # kui saadud arv on väiksem kui 10, siis ongi elutee number käes\n if n < 10:\n return n\n # kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,\n #selleks kasutame jälle sama funktsiooni\n else:\n return elutee(str(n))\n\nfor i in range(1,10):\n fileName = \"eluteenumber\" + str(i) + \".txt\"\n f = open(fileName, \"a\")\n\n# inputFile = input(\"Palun sisestage sünnikuupäevade faili nimi: \") TEST EI TAHA FAILI SISESTAMIST NÄHAGI!\nfile = open(\"sunnikuupaevad.txt\", encoding=\"UTF-8\")\n\nfor row in file:\n fileName = \"eluteenumber\" + str(elutee(row.strip())) + \".txt\"\n file = open(fileName, \"a\", encoding=\"UTF-8\")\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Auxiliary functions for calculating the utility of achieving a certain data rate (for a UE).
Attention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!
"""
import numpy as np
from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY
def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):
"""
Utility that directly equals the data rate, increasing linearly up to a given maximum.
:param max_dr: Maximum data rate at which the utility does not increase further
:return: Utility
"""
assert curr_dr >= 0 and max_dr >= 0
assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \
"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!"
return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
# 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more
# 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr
# with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)
# better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr
# ensure min/max utility are set correctly for this utility function
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, "The chosen log utility requires min/max utility to be -20/+20"
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
|
normal
|
{
"blob_id": "e3de072d6bce2ecc105306c06b9a9aa0362130ff",
"index": 6234,
"step-1": "<mask token>\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-2": "<mask token>\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-3": "<mask token>\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-5": "\"\"\"\nAuxiliary functions for calculating the utility of achieving a certain data rate (for a UE).\nAttention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!\n\"\"\"\nimport numpy as np\n\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \\\n \"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!\"\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n # 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more\n # 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr\n # with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)\n\n # better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr\n # ensure min/max utility are set correctly for this utility function\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, \"The chosen log utility requires min/max utility to be -20/+20\"\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import log
import core
import time
__description__ = 'OS X Auditor'
__author__ = 'Atarimaster & @Jipe_'
__version__ = '0.5.0'
ROOT_PATH = '/'
Euid = str(os.geteuid())
Egid = str(os.getegid())
def generate_header():
header = {}
# Description(Audited By)
description = "Report generated by " + __description__ + " v" + __version__ + " on " + time.strftime('%x %X %Z') + " running as " + Euid + "/" + Egid
header['description'] = description
# Audited Path
audit_path = "Audited system path: " + ROOT_PATH.decode("utf-8")
header['audit_path'] = audit_path
# System Version
AuditedSystemVersion = GetAuditedSystemVersion()
sysv = "Version of the audited system: " + AuditedSystemVersion
header['system_version'] = sysv
# Current Timezone
Timezone = GetAuditedSystemTimezone()
tz = "Current timezone of the audited system: " + Timezone
header['timezone'] = tz
return header
def GetAuditedSystemVersion():
global OSX_VERSION
SysVersion = "Unknown system version"
SystemVersionPlist = False
SystemVersionPlist = core.UniversalReadPlist("/System/Library/CoreServices/SystemVersion.plist")
if SystemVersionPlist:
if "ProductName" in SystemVersionPlist: SysVersion = SystemVersionPlist["ProductName"]
if "ProductVersion" in SystemVersionPlist: SysVersion += " " + SystemVersionPlist["ProductVersion"]
if "ProductBuildVersion" in SystemVersionPlist: SysVersion += " build " + SystemVersionPlist["ProductBuildVersion"]
OSX_VERSION = {
"ProductBuildVersion": SystemVersionPlist["ProductBuildVersion"],
"ProductVersion": SystemVersionPlist["ProductVersion"],
"MajorVersion": int(SystemVersionPlist["ProductVersion"].split('.')[0]),
"MinorVersion": int(SystemVersionPlist["ProductVersion"].split('.')[1]),
"PatchVersion": int(SystemVersionPlist["ProductVersion"].split('.')[2])
}
else:
log.PrintAndLog(u"Cannot determine the system version", "ERROR")
return SysVersion
def GetAuditedSystemTimezone():
""" Return the current system timezone """
Timezone = False
try:
Timezone = os.path.realpath(os.path.join(ROOT_PATH, "etc/localtime"))
Timezone = Timezone.split("/")
except Exception as e:
PrintAndLog(u"Cannot read the timezone" + str(e.args).decode("utf-8"), "ERROR")
return Timezone[-2] + "/" + Timezone[-1]
|
normal
|
{
"blob_id": "547d67bce7eb05e55e02c73a22342ca572e89f39",
"index": 9959,
"step-1": "<mask token>\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-2": "<mask token>\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-3": "<mask token>\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\nROOT_PATH = '/'\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-4": "import os\nimport log\nimport core\nimport time\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\nROOT_PATH = '/'\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-5": "import os\nimport log\nimport core\nimport time\n\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\n\nROOT_PATH = '/'\n\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\ndef generate_header():\n header = {}\n\n # Description(Audited By)\n description = \"Report generated by \" + __description__ + \" v\" + __version__ + \" on \" + time.strftime('%x %X %Z') + \" running as \" + Euid + \"/\" + Egid\n header['description'] = description\n\n # Audited Path\n audit_path = \"Audited system path: \" + ROOT_PATH.decode(\"utf-8\")\n header['audit_path'] = audit_path\n\n # System Version\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = \"Version of the audited system: \" + AuditedSystemVersion\n header['system_version'] = sysv\n\n # Current Timezone\n Timezone = GetAuditedSystemTimezone()\n tz = \"Current timezone of the audited system: \" + Timezone\n header['timezone'] = tz\n\n return header\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n\n SysVersion = \"Unknown system version\"\n SystemVersionPlist = False\n\n SystemVersionPlist = core.UniversalReadPlist(\"/System/Library/CoreServices/SystemVersion.plist\")\n\n if SystemVersionPlist:\n if \"ProductName\" in SystemVersionPlist: SysVersion = SystemVersionPlist[\"ProductName\"]\n if \"ProductVersion\" in SystemVersionPlist: SysVersion += \" \" + SystemVersionPlist[\"ProductVersion\"]\n if \"ProductBuildVersion\" in SystemVersionPlist: SysVersion += \" build \" + SystemVersionPlist[\"ProductBuildVersion\"]\n\n OSX_VERSION = {\n \"ProductBuildVersion\": SystemVersionPlist[\"ProductBuildVersion\"],\n \"ProductVersion\": SystemVersionPlist[\"ProductVersion\"],\n \"MajorVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[0]),\n \"MinorVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[1]),\n \"PatchVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[2])\n }\n\n else:\n log.PrintAndLog(u\"Cannot determine the system version\", \"ERROR\")\n\n return SysVersion\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, \"etc/localtime\"))\n Timezone = Timezone.split(\"/\")\n except Exception as e:\n PrintAndLog(u\"Cannot read the timezone\" + str(e.args).decode(\"utf-8\"), \"ERROR\")\n\n return Timezone[-2] + \"/\" + Timezone[-1]",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#coding: utf-8
import logging
from threading import Thread
from ldap import SCOPE_BASE
from seafevents.ldap_syncer.ldap_conn import LdapConn
from seafevents.ldap_syncer.utils import bytes2str, add_group_uuid_pair
from seaserv import get_group_dn_pairs
logger = logging.getLogger(__name__)
def migrate_dn_pairs(settings):
grp_dn_pairs = get_group_dn_pairs()
if grp_dn_pairs is None:
logger.warning('get group dn pairs from db failed when migrate dn pairs.')
return
grp_dn_pairs.reverse()
for grp_dn_pair in grp_dn_pairs:
for config in settings.ldap_configs:
search_filter = '(objectClass=*)'
ldap_conn = LdapConn(config.host, config.user_dn, config.passwd, config.follow_referrals)
ldap_conn.create_conn()
if not ldap_conn.conn:
logger.warning('connect ldap server [%s] failed.' % config.user_dn)
return
if config.use_page_result:
results = ldap_conn.paged_search(grp_dn_pair.dn, SCOPE_BASE,
search_filter,
[config.group_uuid_attr])
else:
results = ldap_conn.search(grp_dn_pair.dn, SCOPE_BASE,
search_filter,
[config.group_uuid_attr])
ldap_conn.unbind_conn()
results = bytes2str(results)
if not results:
continue
else:
uuid = results[0][1][config.group_uuid_attr][0]
add_group_uuid_pair(grp_dn_pair.group_id, uuid)
class LdapSync(Thread):
def __init__(self, settings):
Thread.__init__(self)
self.settings = settings
def run(self):
if self.settings.enable_group_sync:
migrate_dn_pairs(settings=self.settings)
self.start_sync()
self.show_sync_result()
def show_sync_result(self):
pass
def start_sync(self):
data_ldap = self.get_data_from_ldap()
if data_ldap is None:
return
data_db = self.get_data_from_db()
if data_db is None:
return
self.sync_data(data_db, data_ldap)
def get_data_from_db(self):
return None
def get_data_from_ldap(self):
ret = {}
for config in self.settings.ldap_configs:
cur_ret = self.get_data_from_ldap_by_server(config)
# If get data from one server failed, then the result is failed
if cur_ret is None:
return None
for key in cur_ret.keys():
if key not in ret:
ret[key] = cur_ret[key]
ret[key].config = config
return ret
def get_data_from_ldap_by_server(self, config):
return None
def sync_data(self, data_db, data_ldap):
pass
|
normal
|
{
"blob_id": "8cc0393082448bb8f61068b5c96e89ef3aee77ed",
"index": 235,
"step-1": "<mask token>\n\n\nclass LdapSync(Thread):\n\n def __init__(self, settings):\n Thread.__init__(self)\n self.settings = settings\n\n def run(self):\n if self.settings.enable_group_sync:\n migrate_dn_pairs(settings=self.settings)\n self.start_sync()\n self.show_sync_result()\n\n def show_sync_result(self):\n pass\n\n def start_sync(self):\n data_ldap = self.get_data_from_ldap()\n if data_ldap is None:\n return\n data_db = self.get_data_from_db()\n if data_db is None:\n return\n self.sync_data(data_db, data_ldap)\n\n def get_data_from_db(self):\n return None\n\n def get_data_from_ldap(self):\n ret = {}\n for config in self.settings.ldap_configs:\n cur_ret = self.get_data_from_ldap_by_server(config)\n if cur_ret is None:\n return None\n for key in cur_ret.keys():\n if key not in ret:\n ret[key] = cur_ret[key]\n ret[key].config = config\n return ret\n\n def get_data_from_ldap_by_server(self, config):\n return None\n\n def sync_data(self, data_db, data_ldap):\n pass\n",
"step-2": "<mask token>\n\n\ndef migrate_dn_pairs(settings):\n grp_dn_pairs = get_group_dn_pairs()\n if grp_dn_pairs is None:\n logger.warning(\n 'get group dn pairs from db failed when migrate dn pairs.')\n return\n grp_dn_pairs.reverse()\n for grp_dn_pair in grp_dn_pairs:\n for config in settings.ldap_configs:\n search_filter = '(objectClass=*)'\n ldap_conn = LdapConn(config.host, config.user_dn, config.passwd,\n config.follow_referrals)\n ldap_conn.create_conn()\n if not ldap_conn.conn:\n logger.warning('connect ldap server [%s] failed.' % config.\n user_dn)\n return\n if config.use_page_result:\n results = ldap_conn.paged_search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n else:\n results = ldap_conn.search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n ldap_conn.unbind_conn()\n results = bytes2str(results)\n if not results:\n continue\n else:\n uuid = results[0][1][config.group_uuid_attr][0]\n add_group_uuid_pair(grp_dn_pair.group_id, uuid)\n\n\nclass LdapSync(Thread):\n\n def __init__(self, settings):\n Thread.__init__(self)\n self.settings = settings\n\n def run(self):\n if self.settings.enable_group_sync:\n migrate_dn_pairs(settings=self.settings)\n self.start_sync()\n self.show_sync_result()\n\n def show_sync_result(self):\n pass\n\n def start_sync(self):\n data_ldap = self.get_data_from_ldap()\n if data_ldap is None:\n return\n data_db = self.get_data_from_db()\n if data_db is None:\n return\n self.sync_data(data_db, data_ldap)\n\n def get_data_from_db(self):\n return None\n\n def get_data_from_ldap(self):\n ret = {}\n for config in self.settings.ldap_configs:\n cur_ret = self.get_data_from_ldap_by_server(config)\n if cur_ret is None:\n return None\n for key in cur_ret.keys():\n if key not in ret:\n ret[key] = cur_ret[key]\n ret[key].config = config\n return ret\n\n def get_data_from_ldap_by_server(self, config):\n return None\n\n def sync_data(self, data_db, data_ldap):\n pass\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_dn_pairs(settings):\n grp_dn_pairs = get_group_dn_pairs()\n if grp_dn_pairs is None:\n logger.warning(\n 'get group dn pairs from db failed when migrate dn pairs.')\n return\n grp_dn_pairs.reverse()\n for grp_dn_pair in grp_dn_pairs:\n for config in settings.ldap_configs:\n search_filter = '(objectClass=*)'\n ldap_conn = LdapConn(config.host, config.user_dn, config.passwd,\n config.follow_referrals)\n ldap_conn.create_conn()\n if not ldap_conn.conn:\n logger.warning('connect ldap server [%s] failed.' % config.\n user_dn)\n return\n if config.use_page_result:\n results = ldap_conn.paged_search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n else:\n results = ldap_conn.search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n ldap_conn.unbind_conn()\n results = bytes2str(results)\n if not results:\n continue\n else:\n uuid = results[0][1][config.group_uuid_attr][0]\n add_group_uuid_pair(grp_dn_pair.group_id, uuid)\n\n\nclass LdapSync(Thread):\n\n def __init__(self, settings):\n Thread.__init__(self)\n self.settings = settings\n\n def run(self):\n if self.settings.enable_group_sync:\n migrate_dn_pairs(settings=self.settings)\n self.start_sync()\n self.show_sync_result()\n\n def show_sync_result(self):\n pass\n\n def start_sync(self):\n data_ldap = self.get_data_from_ldap()\n if data_ldap is None:\n return\n data_db = self.get_data_from_db()\n if data_db is None:\n return\n self.sync_data(data_db, data_ldap)\n\n def get_data_from_db(self):\n return None\n\n def get_data_from_ldap(self):\n ret = {}\n for config in self.settings.ldap_configs:\n cur_ret = self.get_data_from_ldap_by_server(config)\n if cur_ret is None:\n return None\n for key in cur_ret.keys():\n if key not in ret:\n ret[key] = cur_ret[key]\n ret[key].config = config\n return ret\n\n def get_data_from_ldap_by_server(self, config):\n return None\n\n def sync_data(self, data_db, data_ldap):\n pass\n",
"step-4": "import logging\nfrom threading import Thread\nfrom ldap import SCOPE_BASE\nfrom seafevents.ldap_syncer.ldap_conn import LdapConn\nfrom seafevents.ldap_syncer.utils import bytes2str, add_group_uuid_pair\nfrom seaserv import get_group_dn_pairs\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_dn_pairs(settings):\n grp_dn_pairs = get_group_dn_pairs()\n if grp_dn_pairs is None:\n logger.warning(\n 'get group dn pairs from db failed when migrate dn pairs.')\n return\n grp_dn_pairs.reverse()\n for grp_dn_pair in grp_dn_pairs:\n for config in settings.ldap_configs:\n search_filter = '(objectClass=*)'\n ldap_conn = LdapConn(config.host, config.user_dn, config.passwd,\n config.follow_referrals)\n ldap_conn.create_conn()\n if not ldap_conn.conn:\n logger.warning('connect ldap server [%s] failed.' % config.\n user_dn)\n return\n if config.use_page_result:\n results = ldap_conn.paged_search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n else:\n results = ldap_conn.search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter, [config.group_uuid_attr])\n ldap_conn.unbind_conn()\n results = bytes2str(results)\n if not results:\n continue\n else:\n uuid = results[0][1][config.group_uuid_attr][0]\n add_group_uuid_pair(grp_dn_pair.group_id, uuid)\n\n\nclass LdapSync(Thread):\n\n def __init__(self, settings):\n Thread.__init__(self)\n self.settings = settings\n\n def run(self):\n if self.settings.enable_group_sync:\n migrate_dn_pairs(settings=self.settings)\n self.start_sync()\n self.show_sync_result()\n\n def show_sync_result(self):\n pass\n\n def start_sync(self):\n data_ldap = self.get_data_from_ldap()\n if data_ldap is None:\n return\n data_db = self.get_data_from_db()\n if data_db is None:\n return\n self.sync_data(data_db, data_ldap)\n\n def get_data_from_db(self):\n return None\n\n def get_data_from_ldap(self):\n ret = {}\n for config in self.settings.ldap_configs:\n cur_ret = self.get_data_from_ldap_by_server(config)\n if cur_ret is None:\n return None\n for key in cur_ret.keys():\n if key not in ret:\n ret[key] = cur_ret[key]\n ret[key].config = config\n return ret\n\n def get_data_from_ldap_by_server(self, config):\n return None\n\n def sync_data(self, data_db, data_ldap):\n pass\n",
"step-5": "#coding: utf-8\nimport logging\nfrom threading import Thread\n\nfrom ldap import SCOPE_BASE\nfrom seafevents.ldap_syncer.ldap_conn import LdapConn\nfrom seafevents.ldap_syncer.utils import bytes2str, add_group_uuid_pair\n\nfrom seaserv import get_group_dn_pairs\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_dn_pairs(settings):\n grp_dn_pairs = get_group_dn_pairs()\n if grp_dn_pairs is None:\n logger.warning('get group dn pairs from db failed when migrate dn pairs.')\n return\n\n grp_dn_pairs.reverse()\n for grp_dn_pair in grp_dn_pairs:\n for config in settings.ldap_configs:\n search_filter = '(objectClass=*)'\n ldap_conn = LdapConn(config.host, config.user_dn, config.passwd, config.follow_referrals)\n ldap_conn.create_conn()\n if not ldap_conn.conn:\n logger.warning('connect ldap server [%s] failed.' % config.user_dn)\n return\n\n if config.use_page_result:\n results = ldap_conn.paged_search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter,\n [config.group_uuid_attr])\n else:\n results = ldap_conn.search(grp_dn_pair.dn, SCOPE_BASE,\n search_filter,\n [config.group_uuid_attr])\n ldap_conn.unbind_conn()\n results = bytes2str(results)\n\n if not results:\n continue\n else:\n uuid = results[0][1][config.group_uuid_attr][0]\n add_group_uuid_pair(grp_dn_pair.group_id, uuid)\n\n\nclass LdapSync(Thread):\n def __init__(self, settings):\n Thread.__init__(self)\n self.settings = settings\n\n def run(self):\n if self.settings.enable_group_sync:\n migrate_dn_pairs(settings=self.settings)\n self.start_sync()\n self.show_sync_result()\n\n def show_sync_result(self):\n pass\n\n def start_sync(self):\n data_ldap = self.get_data_from_ldap()\n if data_ldap is None:\n return\n\n data_db = self.get_data_from_db()\n if data_db is None:\n return\n\n self.sync_data(data_db, data_ldap)\n\n def get_data_from_db(self):\n return None\n\n def get_data_from_ldap(self):\n ret = {}\n\n for config in self.settings.ldap_configs:\n cur_ret = self.get_data_from_ldap_by_server(config)\n # If get data from one server failed, then the result is failed\n if cur_ret is None:\n return None\n for key in cur_ret.keys():\n if key not in ret:\n ret[key] = cur_ret[key]\n ret[key].config = config\n\n return ret\n\n def get_data_from_ldap_by_server(self, config):\n return None\n\n def sync_data(self, data_db, data_ldap):\n pass\n",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
# =============>This is a Normal mathematical tasks<==========
x = 7
x = 7 // 3 # rounds the number = 2 ans class int
#x = 7 / 3 # gives the floating number = 2.33333335 ans class float
#x = 7 % 3 # gives the reminder = 1 ans class int
#print("x is {}" .format(x))
#print(type(x))
# ================>This is how to add decimal accuracy vs procession<================
# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal
from decimal import *
x = .1 + .1 + .1 -.3
print("x is {}" .format(x))
print(type(x))
# =============>How to solve the above problem accuracy<===============
# And the type is class decimal.Decimal
# When dealing with money use this method
from decimal import *
a = Decimal('.10') # it will conver from string
b = Decimal('.30')
x = a + a + a - b
print("x is {}" .format(x))
print(type(x))
|
normal
|
{
"blob_id": "62a7958ba5ebb6da866d6ef156e52136df22f235",
"index": 107,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-3": "x = 7\nx = 7 // 3\n<mask token>\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-4": "x = 7\nx = 7 // 3\nfrom decimal import *\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\nfrom decimal import *\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-5": "\n# =============>This is a Normal mathematical tasks<==========\nx = 7\nx = 7 // 3 # rounds the number = 2 ans class int\n#x = 7 / 3 # gives the floating number = 2.33333335 ans class float\n#x = 7 % 3 # gives the reminder = 1 ans class int\n\n#print(\"x is {}\" .format(x))\n#print(type(x))\n# ================>This is how to add decimal accuracy vs procession<================\n# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal\nfrom decimal import *\nx = .1 + .1 + .1 -.3\nprint(\"x is {}\" .format(x))\nprint(type(x))\n# =============>How to solve the above problem accuracy<===============\n# And the type is class decimal.Decimal\n# When dealing with money use this method\nfrom decimal import *\na = Decimal('.10') # it will conver from string\nb = Decimal('.30')\nx = a + a + a - b\nprint(\"x is {}\" .format(x))\nprint(type(x))\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Flask app for testing the OpenID Connect extension.
"""
import json
from unittest.mock import MagicMock, Mock
from flask import Flask, g
import flask_oidc
from tests.json_snippets import *
oidc = None
def index():
return "too many secrets", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_at():
return oidc.get_access_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_rt():
return oidc.get_refresh_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test1():
return "successful call to test1", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test2():
return "successful call to test2", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test3():
return "successful call to test3", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_unprotected():
return "successful call to unprotected", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def raw_api():
return {'token': g.oidc_token_info}
def api():
return json.dumps(raw_api())
def get_test4():
return "successful call to test4", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
callback_method = Mock()
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
# Check standalone usage
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
# Check combination with an external API renderer like Flask-RESTful
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)
return app
def configure_keycloak_test_uris(app):
test1 = app.oidc.check_authorization(True)(get_test1)
app.route('/test1', methods=['GET', 'POST'])(test1)
test2 = app.oidc.check_authorization(True)(get_test2)
app.route('/test2', methods=['GET', 'POST'])(test2)
test3 = app.oidc.check_authorization(True)(get_test3)
app.route('/test3', methods=['GET', 'POST'])(test3)
callback_method.return_value = True
test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)
app.route('/test4', methods=['GET', 'POST'])(test4)
unprotected = app.oidc.check_authorization(False)(get_unprotected)
app.route('/unprotected', methods=['GET'])(unprotected)
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = "abc"
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
def configure_mock_version3(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = None
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
|
normal
|
{
"blob_id": "ef3fa538828315845de5e2f7d4949f690e44276e",
"index": 6009,
"step-1": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-5": "\"\"\"\nFlask app for testing the OpenID Connect extension.\n\"\"\"\n\nimport json\nfrom unittest.mock import MagicMock, Mock\n\nfrom flask import Flask, g\nimport flask_oidc\nfrom tests.json_snippets import *\n\noidc = None\n\n\ndef index():\n return \"too many secrets\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_rt():\n return oidc.get_refresh_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test1():\n return \"successful call to test1\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test2():\n return \"successful call to test2\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test3():\n return \"successful call to test3\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_unprotected():\n return \"successful call to unprotected\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return \"successful call to test4\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ncallback_method = Mock()\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n # Check standalone usage\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n\n configure_keycloak_test_uris(app)\n\n # Check combination with an external API renderer like Flask-RESTful\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n\n callback_method.return_value = True\n\n test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = \"abc\"\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-ids": [
10,
11,
13,
15,
19
]
}
|
[
10,
11,
13,
15,
19
] |
'''
Дано предложение, в котором имеются буквы с и т. Определить, какая из них встречается
позже (при просмотре слова слева направо). Если таких букв несколько, то должны
учитываться последние из них. Оператор цикла с условием не использовать.
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
text = input("Введите предложение: ")
x1 = text.index("с")
x2 = text.index("т")
if x1 > x2:
print("Бурква 'с' встречается позже")
else:
print("Бурква 'т' встречается позже")
|
normal
|
{
"blob_id": "4bad45f8c135463fadea9b3eed52ab045a51e8db",
"index": 2520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n text = input('Введите предложение: ')\n x1 = text.index('с')\n x2 = text.index('т')\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-3": "'''\nДано предложение, в котором имеются буквы с и т. Определить, какая из них встречается\nпозже (при просмотре слова слева направо). Если таких букв несколько, то должны\nучитываться последние из них. Оператор цикла с условием не использовать.\n'''\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n text = input(\"Введите предложение: \")\n\n x1 = text.index(\"с\")\n x2 = text.index(\"т\")\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
from django import template
from django.conf import settings
from django.utils.html import format_html
register = template.Library()
@register.simple_tag
def website_title():
return settings.WEBSITE_TITLE
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = "<nav>"
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += "<li>"
return_str += "<a href='?page=" + str(result_obj.previous_page_number()) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += "</a></li>"
for i in result_obj.paginator.page_range:
# print(i,result_obj.paginator.page_range,result_obj.number)
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3: # 3为当前页前后显示多少个
return_str += "<li "
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i) + "'>" + str(i) + "</a></li>"
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i) + "</a></li>"
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()) + "' aria-label='Next'>"
return_str += "<span aria-hidden='true'>»</span></a></li></ul></nav>"
#return format_html(return_str)
return return_str
@register.simple_tag
def test(string):
return string
|
normal
|
{
"blob_id": "c2c51dcd05c21e91e591de25fc2de034c88c48a1",
"index": 9052,
"step-1": "<mask token>\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\[email protected]_tag\ndef test(string):\n return string\n",
"step-2": "<mask token>\n\n\[email protected]_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\[email protected]_tag\ndef test(string):\n return string\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected]_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\[email protected]_tag\ndef test(string):\n return string\n",
"step-4": "from django import template\nfrom django.conf import settings\nfrom django.utils.html import format_html\nregister = template.Library()\n\n\[email protected]_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\[email protected]_tag\ndef test(string):\n return string\n",
"step-5": "#!/usr/bin/env python\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.html import format_html\n\n\nregister = template.Library()\n\[email protected]_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = \"<nav>\"\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += \"<li>\"\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += \"</a></li>\"\n\n for i in result_obj.paginator.page_range:\n # print(i,result_obj.paginator.page_range,result_obj.number)\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3: # 3为当前页前后显示多少个\n return_str += \"<li \"\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i) + \"'>\" + str(i) + \"</a></li>\"\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i) + \"</a></li>\"\n\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()) + \"' aria-label='Next'>\"\n return_str += \"<span aria-hidden='true'>»</span></a></li></ul></nav>\"\n\n #return format_html(return_str)\n return return_str\n\n\[email protected]_tag\ndef test(string):\n return string\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding: utf-8
# In[5]:
import os
import numpy as np
import pandas as pd
from PIL import Image
import argparse
import time
import shutil
from sklearn.metrics import accuracy_score, mean_squared_error
import torch
import torch.optim
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
import matplotlib.image as mpimg
class ProtestDataset(Dataset):
"""
dataset for training and evaluation
"""
def __init__(self, txt_file, img_dir, transform = None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter="\t").replace('-', 0)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest':protest, 'violence':violence, 'visattr':visattr}
sample = {"image":image, "label":label}
if self.transform:
sample["image"] = self.transform(sample["image"])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.Grayscale(num_output_channels=1), #testtest
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.img_list[idx])
image = pil_loader(imgpath)
# we need this variable to check if the image is protest or not)
sample = {"imgpath":imgpath, "image":image}
sample["image"] = self.transform(sample["image"])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def modified_resnet():
# load pretrained resnet with a modified last fully connected layer
model = models.resnet50(pretrained = True)
model.fc = FinalLayer()
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
# for indexing output of the model
protest_idx = Variable(torch.LongTensor([0]))
violence_idx = Variable(torch.LongTensor([1]))
visattr_idx = Variable(torch.LongTensor(range(2,12)))
best_loss = float("inf")
def calculate_loss(output, target, criterions, weights = [1, 10, 5]):
"""Calculate loss"""
# number of protest images
N_protest = int(target['protest'].data.sum())
batch_size = len(target['protest'])
if N_protest == 0:
# if no protest image in target
outputs = [None]
# protest output
outputs[0] = output.index_select(1, protest_idx)
targets = [None]
# protest target
targets[0] = target['protest'].float()
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]
scores = {}
scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)
scores['violence_mse'] = 0
scores['visattr_acc'] = 0
return losses, scores, N_protest
# used for filling 0 for non-protest images
not_protest_mask = (1 - target['protest']).byte()
outputs = [None] * 4
# protest output
outputs[0] = output.index_select(1, protest_idx)
# violence output
outputs[1] = output.index_select(1, violence_idx)
outputs[1].masked_fill_(not_protest_mask, 0)
# visual attribute output
outputs[2] = output.index_select(1, visattr_idx)
outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)
targets = [None] * 4
targets[0] = target['protest'].float()
targets[1] = target['violence'].float()
targets[2] = target['visattr'].float()
scores = {}
# protest accuracy for this batch
scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)
# violence MSE for this batch
scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)
# mean accuracy for visual attribute for this batch
comparison = (outputs[2].data.round() == targets[2].data)
comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)
n_right = comparison.float().sum()
mean_acc = n_right / float(N_protest*10)
scores['visattr_acc'] = mean_acc
# return weighted loss
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]
return losses, scores, N_protest
def train(train_loader, model, criterions, optimizer, epoch):
"""training the model"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(train_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
data_time.update(time.time() - end)
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
input_var = Variable(input)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
optimizer.zero_grad()
loss = 0
for l in losses:
loss += l
# back prop
loss.backward()
optimizer.step()
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when there is no protest image in the batch
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}] '
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Data {data_time.val:.2f} ({data_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss_val=loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc, violence_mse = violence_mse,
visattr_acc = visattr_acc))
return loss_history
def validate(val_loader, model, criterions, epoch):
"""Validating"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(val_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
input_var = Variable(input)
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
loss = 0
for l in losses:
loss += l
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when no protest images
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(val_loader), batch_time=batch_time,
loss_val =loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '
'Violence MSE {violence_mse.avg:.5f} '
'Vis Attr Acc {visattr_acc.avg:.3f} '
.format(loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
return loss_protest.avg + loss_v.avg, loss_history
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs"""
lr = args.lr * (0.4 ** (epoch // 4))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Save checkpoints"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def main():
global best_loss
loss_history_train = []
loss_history_val = []
data_dir = args.data_dir
img_dir_train = os.path.join(data_dir, "train")
img_dir_val = os.path.join(data_dir, "test")
txt_file_train = os.path.join(data_dir, "annot_train.txt")
txt_file_val = os.path.join(data_dir, "annot_test.txt")
# load pretrained resnet50 with a modified last fully connected layer
model = modified_resnet()
# we need three different criterion for training
criterion_protest = nn.BCELoss()
criterion_violence = nn.MSELoss()
criterion_visattr = nn.BCELoss()
criterions = [criterion_protest, criterion_violence, criterion_visattr]
if args.cuda and not torch.cuda.is_available():
raise Exception("No GPU Found")
if args.cuda:
model = model.cuda()
criterions = [criterion.cuda() for criterion in criterions]
# we are not training the frozen layers
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.SGD(
parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
if args.change_lr:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
else:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = ProtestDataset(
txt_file = txt_file_train,
img_dir = img_dir_train,
transform = transforms.Compose([
transforms.RandomResizedCrop(100),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness = 0.4,
contrast = 0.7,
saturation = 0.4,
),
transforms.ToTensor(),
Lighting(0.1, eigval, eigvec),
normalize,
]))
val_dataset = ProtestDataset(
txt_file = txt_file_val,
img_dir = img_dir_val,
transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(
train_dataset,
num_workers = args.workers,
batch_size = args.batch_size,
shuffle = True
)
val_loader = DataLoader(
val_dataset,
num_workers = args.workers,
batch_size = args.batch_size)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
loss_history_train_this = train(train_loader, model, criterions,
optimizer, epoch)
loss_val, loss_history_val_this = validate(val_loader, model,
criterions, epoch)
loss_history_train.append(loss_history_train_this)
loss_history_val.append(loss_history_val_this)
is_best = loss_val < best_loss
if is_best:
print('best model!!')
best_loss = min(loss_val, best_loss)
save_checkpoint({
'epoch' : epoch + 1,
'state_dict' : model.state_dict(),
'best_loss' : best_loss,
'optimizer' : optimizer.state_dict(),
'loss_history_train': loss_history_train,
'loss_history_val': loss_history_val
}, is_best)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
type=str,
default = "",
help = "directory path to dataset",
)
parser.add_argument("--cuda",
action = "store_true",
help = "use cuda?",
)
parser.add_argument("--workers",
type = int,
default = 0,
help = "number of workers",
)
parser.add_argument("--batch_size",
type = int,
default = 8,
help = "batch size",
)
parser.add_argument("--epochs",
type = int,
default = 10,
help = "number of epochs",
)
parser.add_argument("--weight_decay",
type = float,
default = 1e-4,
help = "weight decay",
)
parser.add_argument("--lr",
type = float,
default = 0.01,
help = "learning rate",
)
parser.add_argument("--momentum",
type = float,
default = 0.9,
help = "momentum",
)
parser.add_argument("--print_freq",
type = int,
default = 10,
help = "print frequency",
)
parser.add_argument('--resume',
default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--change_lr',
action = "store_true",
help = "Use this if you want to \
change learning rate when resuming")
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
args, unknown = parser.parse_known_args()
if args.cuda:
protest_idx = protest_idx.cuda()
violence_idx = violence_idx.cuda()
visattr_idx = visattr_idx.cuda()
main()
|
normal
|
{
"blob_id": "f3a3746c48617754aad5ae8d0d7a0b8908c34562",
"index": 7852,
"step-1": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n <mask token>\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n model.train()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n input_var = Variable(input)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n loss.backward()\n optimizer.step()\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss_val=loss_protest.val + loss_v.val,\n loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=\n visattr_acc))\n return loss_history\n\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n input, target = sample['image'], sample['label']\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n loss = 0\n for l in losses:\n loss += l\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\tTime {batch_time.val:.2f} ({batch_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(val_loader), batch_time=batch_time,\n loss_val=loss_protest.val + loss_v.val, loss_avg=\n loss_protest.avg + loss_v.avg, protest_acc=protest_acc,\n violence_mse=violence_mse, visattr_acc=visattr_acc))\n print(\n ' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} Violence MSE {violence_mse.avg:.5f} Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\n\n<mask token>\n",
"step-5": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport argparse\nimport time\nimport shutil\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nimport torch\nimport torch.optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport matplotlib.image as mpimg\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n def __init__(self, txt_file, img_dir, transform = None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter=\"\\t\").replace('-', 0)\n self.img_dir = img_dir\n self.transform = transform\n def __len__(self):\n return len(self.label_frame)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n \n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest':protest, 'violence':violence, 'visattr':visattr}\n\n sample = {\"image\":image, \"label\":label}\n if self.transform:\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.Grayscale(num_output_channels=1), #testtest\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n self.img_list = sorted(os.listdir(img_dir))\n def __len__(self):\n return len(self.img_list)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.img_list[idx])\n image = pil_loader(imgpath)\n # we need this variable to check if the image is protest or not)\n sample = {\"imgpath\":imgpath, \"image\":image}\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\ndef modified_resnet():\n # load pretrained resnet with a modified last fully connected layer\n model = models.resnet50(pretrained = True)\n model.fc = FinalLayer()\n return model\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n \n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()\n\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n# for indexing output of the model\nprotest_idx = Variable(torch.LongTensor([0]))\nviolence_idx = Variable(torch.LongTensor([1]))\nvisattr_idx = Variable(torch.LongTensor(range(2,12)))\nbest_loss = float(\"inf\")\n\ndef calculate_loss(output, target, criterions, weights = [1, 10, 5]):\n \"\"\"Calculate loss\"\"\"\n # number of protest images\n N_protest = int(target['protest'].data.sum())\n batch_size = len(target['protest'])\n\n if N_protest == 0:\n # if no protest image in target\n outputs = [None]\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n targets = [None]\n # protest target\n targets[0] = target['protest'].float()\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]\n scores = {}\n scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)\n scores['violence_mse'] = 0\n scores['visattr_acc'] = 0\n return losses, scores, N_protest\n\n # used for filling 0 for non-protest images\n not_protest_mask = (1 - target['protest']).byte()\n\n outputs = [None] * 4\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n # violence output\n outputs[1] = output.index_select(1, violence_idx)\n outputs[1].masked_fill_(not_protest_mask, 0)\n # visual attribute output\n outputs[2] = output.index_select(1, visattr_idx)\n outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)\n\n\n targets = [None] * 4\n\n targets[0] = target['protest'].float()\n targets[1] = target['violence'].float()\n targets[2] = target['visattr'].float()\n\n scores = {}\n # protest accuracy for this batch\n scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)\n # violence MSE for this batch\n scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)\n # mean accuracy for visual attribute for this batch\n comparison = (outputs[2].data.round() == targets[2].data)\n comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)\n n_right = comparison.float().sum()\n mean_acc = n_right / float(N_protest*10)\n scores['visattr_acc'] = mean_acc\n\n # return weighted loss\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]\n\n return losses, scores, N_protest\n\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n input_var = Variable(input)\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n # back prop\n loss.backward()\n optimizer.step()\n \n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when there is no protest image in the batch\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}] '\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Data {data_time.val:.2f} ({data_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time,\n loss_val=loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc, violence_mse = violence_mse,\n visattr_acc = visattr_acc))\n\n return loss_history\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n loss = 0\n for l in losses:\n loss += l\n\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when no protest images\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(val_loader), batch_time=batch_time,\n loss_val =loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n\n print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '\n 'Violence MSE {violence_mse.avg:.5f} '\n 'Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs\"\"\"\n lr = args.lr * (0.4 ** (epoch // 4))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n \"\"\"Save checkpoints\"\"\"\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\ndef main():\n global best_loss\n loss_history_train = []\n loss_history_val = []\n data_dir = args.data_dir\n img_dir_train = os.path.join(data_dir, \"train\")\n img_dir_val = os.path.join(data_dir, \"test\")\n txt_file_train = os.path.join(data_dir, \"annot_train.txt\")\n txt_file_val = os.path.join(data_dir, \"annot_test.txt\")\n\n # load pretrained resnet50 with a modified last fully connected layer\n model = modified_resnet()\n\n # we need three different criterion for training\n criterion_protest = nn.BCELoss()\n criterion_violence = nn.MSELoss()\n criterion_visattr = nn.BCELoss()\n criterions = [criterion_protest, criterion_violence, criterion_visattr]\n\n if args.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU Found\")\n if args.cuda:\n model = model.cuda()\n criterions = [criterion.cuda() for criterion in criterions]\n # we are not training the frozen layers\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n\n optimizer = torch.optim.SGD(\n parameters, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay\n )\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_loss = checkpoint['best_loss']\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n loss_history_train = checkpoint['loss_history_train']\n loss_history_val = checkpoint['loss_history_val']\n if args.change_lr:\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n eigval = torch.Tensor([0.2175, 0.0188, 0.0045])\n eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n\n train_dataset = ProtestDataset(\n txt_file = txt_file_train,\n img_dir = img_dir_train,\n transform = transforms.Compose([\n transforms.RandomResizedCrop(100),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(\n brightness = 0.4,\n contrast = 0.7,\n saturation = 0.4,\n ),\n transforms.ToTensor(),\n Lighting(0.1, eigval, eigvec),\n normalize,\n ]))\n val_dataset = ProtestDataset(\n txt_file = txt_file_val,\n img_dir = img_dir_val,\n transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = DataLoader(\n train_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size,\n shuffle = True\n )\n val_loader = DataLoader(\n val_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size)\n\n for epoch in range(args.start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch)\n loss_history_train_this = train(train_loader, model, criterions,\n optimizer, epoch)\n loss_val, loss_history_val_this = validate(val_loader, model,\n criterions, epoch)\n loss_history_train.append(loss_history_train_this)\n loss_history_val.append(loss_history_val_this)\n\n is_best = loss_val < best_loss\n if is_best:\n print('best model!!')\n best_loss = min(loss_val, best_loss)\n\n\n save_checkpoint({\n 'epoch' : epoch + 1,\n 'state_dict' : model.state_dict(),\n 'best_loss' : best_loss,\n 'optimizer' : optimizer.state_dict(),\n 'loss_history_train': loss_history_train,\n 'loss_history_val': loss_history_val\n }, is_best)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\",\n type=str,\n default = \"\",\n help = \"directory path to dataset\",\n )\n parser.add_argument(\"--cuda\",\n action = \"store_true\",\n help = \"use cuda?\",\n )\n parser.add_argument(\"--workers\",\n type = int,\n default = 0,\n help = \"number of workers\",\n )\n parser.add_argument(\"--batch_size\",\n type = int,\n default = 8,\n help = \"batch size\",\n )\n parser.add_argument(\"--epochs\",\n type = int,\n default = 10,\n help = \"number of epochs\",\n )\n parser.add_argument(\"--weight_decay\",\n type = float,\n default = 1e-4,\n help = \"weight decay\",\n )\n parser.add_argument(\"--lr\",\n type = float,\n default = 0.01,\n help = \"learning rate\",\n )\n parser.add_argument(\"--momentum\",\n type = float,\n default = 0.9,\n help = \"momentum\",\n )\n parser.add_argument(\"--print_freq\",\n type = int,\n default = 10,\n help = \"print frequency\",\n )\n parser.add_argument('--resume',\n default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--change_lr',\n action = \"store_true\",\n help = \"Use this if you want to \\\n change learning rate when resuming\")\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n args, unknown = parser.parse_known_args()\n\n if args.cuda:\n protest_idx = protest_idx.cuda()\n violence_idx = violence_idx.cuda()\n visattr_idx = visattr_idx.cuda()\n\n\n main()\n\n",
"step-ids": [
20,
21,
22,
25,
35
]
}
|
[
20,
21,
22,
25,
35
] |
x = int(input('masukkan'))
y = int(input('masukkan'))
def jumlah(x, y):
hasil = x + y
return hasil
print('hasil dari', x, '+', y, '=', jumlah(x, y))
k = jumlah(2, 4) + 1
print(k)
|
normal
|
{
"blob_id": "d8482da6b9983d990da980c3a5edab0c49a28229",
"index": 2219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef jumlah(x, y):\n hasil = x + y\n return hasil\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef jumlah(x, y):\n hasil = x + y\n return hasil\n\n\nprint('hasil dari', x, '+', y, '=', jumlah(x, y))\n<mask token>\nprint(k)\n",
"step-4": "x = int(input('masukkan'))\ny = int(input('masukkan'))\n\n\ndef jumlah(x, y):\n hasil = x + y\n return hasil\n\n\nprint('hasil dari', x, '+', y, '=', jumlah(x, y))\nk = jumlah(2, 4) + 1\nprint(k)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import random
import time
import numpy as np
class NeuralNetwork:
digits = [
[
1,1,1,1,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1
],
[
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,1,0,
0,0,1,0,0,
0,1,0,0,0,
1,0,0,0,0
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
]
]
base_output = [
[1,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]
]
show_operations = False
def __init__(self, seed = 5, alpha = 0.1, min_error_percentage = 0.0005, input_size = 25, output_size = 10, hidden_num = 5):
self.seed = seed
self.alpha = alpha
self.min_error_percentage = min_error_percentage
self.input_size = input_size
self.output_size = output_size
self.hidden_num = hidden_num
def withSeed(self, seed):
self.seed = seed
return self
def withAlpha(self, alpha):
self.alpha = alpha
return self
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
def withHiddenLabels(self, hidden_num):
self.hidden_num = hidden_num
return self
def randomize(self):
random.seed(self.seed)
neural_network = [
[
[random.randint(-1, 0) for _ in range(self.input_size + 1)] for _ in range(self.hidden_num)
],
[
[random.randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in range(self.output_size)
]
]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([a * b for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
def ffnn(self, neural_network, inputs):
outputs = []
for label in neural_network:
inputs = inputs + [1]
output = [self.neuron_output(neuron, inputs) for neuron in label]
outputs.append(output)
inputs = output
return outputs
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output, target in zip(output, target)) * 0.5
delta_output = [output * (1 - output) * (output - target) for output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if (self.show_operations):
print("Neuron weights: ", i, output_neuron)
hidden_delta = [hidden_output_current * (1 - hidden_output_current) * self.product(delta_output, [n[i] for n in digit[-1]]) for i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if (self.show_operations):
print("Hidden neuron weights: ", i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print("Starting training...")
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print("Initial random network: ", output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if (self.show_operations):
print("Iterations: ", iterations, ", error percentage: ", sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print("Trained finished in: ", elapsed, " seconds")
print("Total iterations: ", iterations)
print("Error percentage: ", sq_error)
print("Output result: ", self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if (current_dif < closest_dif):
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print("Output: ", result)
print("Your number probably is: ", self.guessWith(result))
|
normal
|
{
"blob_id": "0af45914c8c111a42b0b9684f5f0ee19ef5eeb70",
"index": 7548,
"step-1": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-2": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n <mask token>\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-3": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-4": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, seed=5, alpha=0.1, min_error_percentage=0.0005,\n input_size=25, output_size=10, hidden_num=5):\n self.seed = seed\n self.alpha = alpha\n self.min_error_percentage = min_error_percentage\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_num = hidden_num\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-5": "import math\nimport random\nimport time\nimport numpy as np\n\nclass NeuralNetwork:\n\n digits = [\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0\n ],\n [\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 0,0,0,0,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 0,0,0,1,0,\n 0,0,1,0,0,\n 0,1,0,0,0,\n 1,0,0,0,0\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 0,0,0,0,1\n ]\n ]\n\n base_output = [\n [1,0,0,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0,0,0],\n [0,0,1,0,0,0,0,0,0,0],\n [0,0,0,1,0,0,0,0,0,0],\n [0,0,0,0,1,0,0,0,0,0],\n [0,0,0,0,0,1,0,0,0,0],\n [0,0,0,0,0,0,1,0,0,0],\n [0,0,0,0,0,0,0,1,0,0],\n [0,0,0,0,0,0,0,0,1,0],\n [0,0,0,0,0,0,0,0,0,1]\n ]\n\n show_operations = False\n\n def __init__(self, seed = 5, alpha = 0.1, min_error_percentage = 0.0005, input_size = 25, output_size = 10, hidden_num = 5):\n self.seed = seed\n self.alpha = alpha\n self.min_error_percentage = min_error_percentage\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_num = hidden_num\n \n def withSeed(self, seed):\n self.seed = seed\n return self\n\n def withAlpha(self, alpha):\n self.alpha = alpha\n return self\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n\n def withHiddenLabels(self, hidden_num):\n self.hidden_num = hidden_num\n return self\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [\n [\n [random.randint(-1, 0) for _ in range(self.input_size + 1)] for _ in range(self.hidden_num)\n ],\n [\n [random.randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in range(self.output_size)\n ]\n ]\n return neural_network\n \n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([a * b for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n \n error = sum((output - target) * (output - target) for output, target in zip(output, target)) * 0.5\n delta_output = [output * (1 - output) * (output - target) for output, target in zip(output, target)]\n \n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if (self.show_operations):\n print(\"Neuron weights: \", i, output_neuron)\n \n hidden_delta = [hidden_output_current * (1 - hidden_output_current) * self.product(delta_output, [n[i] for n in digit[-1]]) for i, hidden_output_current in enumerate(hidden_output)]\n \n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if (self.show_operations):\n print(\"Hidden neuron weights: \", i, hidden_neuron)\n\n return new_hidden, new_output, error \n \n def randomTraining(self):\n print(\"Starting training...\")\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n\n print(\"Initial random network: \", output)\n\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if (self.show_operations):\n print(\"Iterations: \", iterations, \", error percentage: \", sq_error)\n iterations += 1\n \n self.output_data = output\n end = time.time()\n elapsed = end - start\n print(\"Trained finished in: \", elapsed, \" seconds\")\n print(\"Total iterations: \", iterations)\n print(\"Error percentage: \", sq_error)\n print(\"Output result: \", self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if (current_dif < closest_dif):\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print(\"Output: \", result)\n print(\"Your number probably is: \", self.guessWith(result))\n",
"step-ids": [
7,
12,
13,
14,
19
]
}
|
[
7,
12,
13,
14,
19
] |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
# import Python so we can mock the parts we need to here.
import IPython.core.display
import IPython.core.magic
import datalab.utils.commands
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},
'')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find('\'geo\'') > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
# TODO(gram): complete this test
pass
|
normal
|
{
"blob_id": "445e91edbeb88a3e300761342b28369fd9833fbb",
"index": 5727,
"step-1": "<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-2": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\n<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\nimport IPython.core.display\nimport IPython.core.magic\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-5": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\n\n# import Python so we can mock the parts we need to here.\nimport IPython.core.display\nimport IPython.core.magic\n\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},\n '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find('\\'geo\\'') > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n # TODO(gram): complete this test\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from math import *
from numpy import *
from random import *
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from gridworld import q_to_arrow
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
# north, east, south, west
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
# This is the set of states, all initialised with default values
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
# make the cliff
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
# The reward function defines what reward I get for transitioning between the first and second state
def reward(s_1, s_2):
if (s_1.is_goal or s_1.is_cliff):
return 0
elif (s_2.is_goal):
return 10
elif (s_2.is_cliff):
return -100
else:
return -1
""" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left"""
def transition(stsp, s, di, dj):
if (s.is_cliff or s.is_goal):
return s
elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0: # NORTH
return -1, 0
elif action == 1: # EAST
return 0, 1
elif action == 2: # SOUTH
return 1, 0
elif action == 3: # WEST
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 0.00001
epsilon = _epsilon
episode_rewards = []
mistakes_array = [] # array which tracks error from convergence on each step
for i in range(N_STEPS):
# select a random starting state
current_state = states[N_ROWS-1][0]
# iterate until reaching a terminal state
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)
# print(current_state, next_state, action_to_verbose(next_action), di, dj)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
'''
if (i % 100 == 0):
print(i)
'''
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([
[-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],
[-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],
[-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],
[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],
[1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0]))
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
# plt.show()
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
# fig, ax = plt.subplots()
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),
ha="center", va="center", color="w")
fig.tight_layout()
ax.set_title("{}; $\epsilon={}$".format(method, epsilon))
for i in range(N_ROWS):
str_ = ""
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ", "
PLOTS += 1
# plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))
# plt.show()
def display_optimal_policy(states, method, epsilon):
print("{}; ε = {}".format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys; sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
# for i, j in [(0, 3), (1, 4), (2, 5)]:
for reward in rewards:
# plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))
# plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
|
normal
|
{
"blob_id": "cb2e800cc2802031847b170a462778e5c0b3c6f9",
"index": 40,
"step-1": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<mask token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\n<mask token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<mask token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<mask token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\n<mask token>\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\n<mask token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<mask token>\n",
"step-3": "<mask token>\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-4": "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-5": "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\n\n\nN_ROWS = 6\nN_COLUMNS = 10\n\nclass State(object):\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n # north, east, south, west\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n # This is the set of states, all initialised with default values\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n\n # make the cliff\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n\n states[-1][-1].is_goal = True\n return states\n\n\n# The reward function defines what reward I get for transitioning between the first and second state\ndef reward(s_1, s_2):\n if (s_1.is_goal or s_1.is_cliff):\n return 0\n elif (s_2.is_goal):\n return 10\n elif (s_2.is_cliff):\n return -100\n else:\n return -1\n\n\"\"\" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the \"states\" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a \"difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left\"\"\"\ndef transition(stsp, s, di, dj):\n if (s.is_cliff or s.is_goal):\n return s\n elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\ngamma = 1\nlearning_rate = 0.01\n\ndef action_to_diff_vector(action):\n if action == 0: # NORTH\n return -1, 0\n elif action == 1: # EAST\n return 0, 1\n elif action == 2: # SOUTH\n return 1, 0\n elif action == 3: # WEST\n return 0, -1\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 0.00001\n epsilon = _epsilon\n\n episode_rewards = []\n mistakes_array = [] # array which tracks error from convergence on each step\n for i in range(N_STEPS):\n # select a random starting state\n current_state = states[N_ROWS-1][0]\n\n # iterate until reaching a terminal state\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)\n\n # print(current_state, next_state, action_to_verbose(next_action), di, dj)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n\n '''\n if (i % 100 == 0):\n print(i)\n '''\n mistakes_array.append(check_accuracy(states))\n\n return np.array(mistakes_array), states, episode_rewards\n\ndef check_accuracy(states):\n correct_result = np.array([\n [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],\n [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],\n [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],\n [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],\n [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n ])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))\n\n return mistakes_delta\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append(r'SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append(r'Q-learning $\\epsilon={}$'.format(mistake_q_learning[0]))\n\n plt.grid(which='y')\n plt.legend(legend)\n\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n # plt.show()\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n # fig, ax = plt.subplots()\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),\n ha=\"center\", va=\"center\", color=\"w\")\n\n fig.tight_layout()\n ax.set_title(\"{}; $\\epsilon={}$\".format(method, epsilon))\n for i in range(N_ROWS):\n str_ = \"\"\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + \", \"\n PLOTS += 1\n # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))\n # plt.show()\n\ndef display_optimal_policy(states, method, epsilon):\n\n print(\"{}; ε = {}\".format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys; sys.exit()\n\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\n\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\n\n\nplt.savefig('all_runs.png')\nplt.show()\n# for i, j in [(0, 3), (1, 4), (2, 5)]:\nfor reward in rewards:\n # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))\n # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\n\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-ids": [
14,
16,
20,
21,
22
]
}
|
[
14,
16,
20,
21,
22
] |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from prettytable import PrettyTable
from time import sleep
from customization import *
import urllib.request,json
chrome_options=webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--incognito")
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
chromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)
def bio_shortener(bio):
lines=[]
x=len(bio)/30
y=0
Status=True
while Status:
y=y+1
lines.append(bio[0:30])
lines.append("\n")
bio=bio[30:]
if y==int(x)+1:
Status=False
A=''.join(lines)
return A
def nb_checker(nb):
if nb!='None':
return nb.text
else:
nb
def quick_search(username):
print("Collecting username information...")
insta_url="https://instagram.com/"+username+"/"
chromeBrowser.get(insta_url)
WebDriverWait(chromeBrowser,5).until(lambda d: d.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input'))
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email)
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password)
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div[1]/div[3]/button').click()
WebDriverWait(chromeBrowser,10).until(lambda d: d.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button'))
chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button').click()
try:
instaName=chromeBrowser.find_element_by_class_name('rhpdm').text
except:
instaName="None"
try:
instaBio=chromeBrowser.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[2]/span').text
except:
instaBio="None"
try:
instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]').text
except NameError:
instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a').text
except:
instaPersonalSite='None'
sleep(1)
chromeBrowser.get('https://stackoverflow.com/users/')
WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))
chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)
sleep(1)
try:
Name=chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')
if str(Name.text.lower())==username.lower():
placeholder=True
except:
placeholder=False
try:
sofLocation=chromeBrowser.find_element_by_class_name('user-location').text
except:
sofLocation='None'
try:
sofUser_tag = chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text
except:
sofUser_tag='None'
try:
chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a').click()
WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'))
except:
placeholder=True
try:
sofBio=chromeBrowser.find_element_by_xpath('//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text
except:
sofBio='None'
githubUrl = "https://api.github.com/users/" + username
try:
with urllib.request.urlopen(githubUrl) as url:
githubData = json.loads(url.read().decode())
gitName=str(githubData['name'])
gitCompany=str(githubData['company'])
gitBlog=str(githubData['blog'])
gitEmail=str(githubData['email'])
gitBio=str(githubData['bio'])
gitTwitter=str(githubData['twitter_username'])
gitLocation=str(githubData['location'])
except:
placeholder=True
pt = PrettyTable(
[' ', ' Instagram ', ' StackOverflow ', ' GitHub '])
pt.add_row(["Name", instaName,"X", gitName])
pt.add_row(["Email", "X","X",gitEmail])
pt.add_row(["Company","X","X", gitCompany])
pt.add_row(["Personal Site", instaPersonalSite,"X", gitBlog])
pt.add_row(["Location", "X", sofLocation, gitLocation])
pt.add_row(["Twitter", "X", "X", gitTwitter])
pt.add_row(["Tags", "X", sofUser_tag, "X"])
pt.add_row(["Biography", bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)])
print(pt)
input()
|
normal
|
{
"blob_id": "e1c902ef340a0a5538b41a03cc93686e0dd31672",
"index": 8788,
"step-1": "<mask token>\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n",
"step-2": "<mask token>\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\n<mask token>\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n",
"step-3": "<mask token>\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom prettytable import PrettyTable\nfrom time import sleep\nfrom customization import *\nimport urllib.request, json\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom prettytable import PrettyTable\nfrom time import sleep\nfrom customization import *\n\nimport urllib.request,json\nchrome_options=webdriver.ChromeOptions()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--incognito\")\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\ndef bio_shortener(bio):\n lines=[]\n x=len(bio)/30\n y=0\n Status=True\n while Status:\n y=y+1\n lines.append(bio[0:30])\n lines.append(\"\\n\")\n bio=bio[30:]\n if y==int(x)+1:\n Status=False\n\n A=''.join(lines)\n return A\n\ndef nb_checker(nb):\n if nb!='None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print(\"Collecting username information...\")\n insta_url=\"https://instagram.com/\"+username+\"/\"\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser,5).until(lambda d: d.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password)\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser,10).until(lambda d: d.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName=chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName=\"None\"\n try:\n instaBio=chromeBrowser.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[2]/span').text\n except:\n instaBio=\"None\"\n try:\n instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]').text\n except NameError:\n instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a').text\n except:\n instaPersonalSite='None'\n\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name=chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower())==username.lower():\n placeholder=True\n except:\n placeholder=False\n try:\n sofLocation=chromeBrowser.find_element_by_class_name('user-location').text\n except:\n sofLocation='None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag='None'\n try:\n chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'))\n except:\n placeholder=True\n try:\n sofBio=chromeBrowser.find_element_by_xpath('//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio='None'\n\n githubUrl = \"https://api.github.com/users/\" + username\n\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName=str(githubData['name'])\n gitCompany=str(githubData['company'])\n gitBlog=str(githubData['blog'])\n gitEmail=str(githubData['email'])\n gitBio=str(githubData['bio'])\n gitTwitter=str(githubData['twitter_username'])\n gitLocation=str(githubData['location'])\n except:\n placeholder=True\n\n pt = PrettyTable(\n [' ', ' Instagram ', ' StackOverflow ', ' GitHub '])\n pt.add_row([\"Name\", instaName,\"X\", gitName])\n pt.add_row([\"Email\", \"X\",\"X\",gitEmail])\n pt.add_row([\"Company\",\"X\",\"X\", gitCompany])\n pt.add_row([\"Personal Site\", instaPersonalSite,\"X\", gitBlog])\n pt.add_row([\"Location\", \"X\", sofLocation, gitLocation])\n pt.add_row([\"Twitter\", \"X\", \"X\", gitTwitter])\n pt.add_row([\"Tags\", \"X\", sofUser_tag, \"X\"])\n pt.add_row([\"Biography\", bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)])\n print(pt)\n input()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
a=[[*map(int,input().split())]for _ in range(int(input()))]
a.sort()
s=0
l0,h0=a[0]
for l,h in a:
if h0<h:s+=(l-l0)*h0;l0,h0=l,h
l1,h1=a[-1]
for l,h in a[::-1]:
if h>h1:s+=(l1-l)*h1;l1,h1=l,h
s+=(l1-l0+1)*h1
print(s)
|
normal
|
{
"blob_id": "62dab85b7ab5fdae8117827b2f56bccf99615cb7",
"index": 7341,
"step-1": "<mask token>\n",
"step-2": "<mask token>\na.sort()\n<mask token>\nfor l, h in a:\n if h0 < h:\n s += (l - l0) * h0\n l0, h0 = l, h\n<mask token>\nfor l, h in a[::-1]:\n if h > h1:\n s += (l1 - l) * h1\n l1, h1 = l, h\ns += (l1 - l0 + 1) * h1\nprint(s)\n",
"step-3": "a = [[*map(int, input().split())] for _ in range(int(input()))]\na.sort()\ns = 0\nl0, h0 = a[0]\nfor l, h in a:\n if h0 < h:\n s += (l - l0) * h0\n l0, h0 = l, h\nl1, h1 = a[-1]\nfor l, h in a[::-1]:\n if h > h1:\n s += (l1 - l) * h1\n l1, h1 = l, h\ns += (l1 - l0 + 1) * h1\nprint(s)\n",
"step-4": "a=[[*map(int,input().split())]for _ in range(int(input()))]\na.sort()\ns=0\nl0,h0=a[0]\nfor l,h in a:\n if h0<h:s+=(l-l0)*h0;l0,h0=l,h\nl1,h1=a[-1]\nfor l,h in a[::-1]:\n if h>h1:s+=(l1-l)*h1;l1,h1=l,h\ns+=(l1-l0+1)*h1\nprint(s)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
config -- config manipulator module for share
@author: shimarin
@copyright: 2014 Walbrix Corporation. All rights reserved.
@license: proprietary
'''
import json,argparse
import oscar,groonga
def parser_setup(parser):
parser.add_argument("base_dir")
parser.add_argument("operations", nargs="*")
parser.set_defaults(func=run)
def get(base_dir, config_name = None):
with oscar.context(base_dir) as context:
with context.command("select") as command:
command.add_argument("table", "Config")
if config_name: command.add_argument("filter", "_key == \"%s\"" % command.escape(config_name))
rows = json.loads(command.execute())[0][2:]
if config_name:
return json.loads(rows[0][2]) if len(rows) > 0 else None
#else
result = {}
for row in rows:
result[row[1]] = json.loads(row[2])
return result
def put(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":config_name,"value":oscar.to_json(value)})
def put_all(base_dir, configs):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", map(lambda (x,y):{"_key":x,"value":oscar.to_json(y)}, configs.items()))
def show_one(base_dir, config_name):
with oscar.context(base_dir) as context:
print groonga.get(context, "Config", config_name)
def set_one(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":"config_name","value":"value"})
def run(args):
if len(args.operations) == 0:
print get(args.base_dir)
elif len(args.operations) == 1:
print get(args.base_dir, args.operations[0])
elif len(args.operations) == 2:
put(args.base_dir, args.operations[0], json.loads(args.operations[1]))
else:
raise Exception("Invalid number of arguments")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser_setup(parser)
args = parser.parse_args()
args.func(args)
|
normal
|
{
"blob_id": "8b4590cf2d8c040b6ab31c63baff0d83ab818641",
"index": 5423,
"step-1": "'''\nconfig -- config manipulator module for share\n\n@author: shimarin\n\n@copyright: 2014 Walbrix Corporation. All rights reserved.\n\n@license: proprietary\n'''\n\nimport json,argparse\nimport oscar,groonga\n\ndef parser_setup(parser):\n parser.add_argument(\"base_dir\")\n parser.add_argument(\"operations\", nargs=\"*\")\n parser.set_defaults(func=run)\n\n\ndef get(base_dir, config_name = None):\n with oscar.context(base_dir) as context:\n with context.command(\"select\") as command:\n command.add_argument(\"table\", \"Config\")\n if config_name: command.add_argument(\"filter\", \"_key == \\\"%s\\\"\" % command.escape(config_name))\n rows = json.loads(command.execute())[0][2:]\n if config_name:\n return json.loads(rows[0][2]) if len(rows) > 0 else None\n #else\n result = {}\n for row in rows:\n result[row[1]] = json.loads(row[2])\n return result\n\ndef put(base_dir, config_name, value):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", {\"_key\":config_name,\"value\":oscar.to_json(value)})\n\ndef put_all(base_dir, configs):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", map(lambda (x,y):{\"_key\":x,\"value\":oscar.to_json(y)}, configs.items()))\n\ndef show_one(base_dir, config_name):\n with oscar.context(base_dir) as context:\n print groonga.get(context, \"Config\", config_name)\n\ndef set_one(base_dir, config_name, value):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", {\"_key\":\"config_name\",\"value\":\"value\"})\n\ndef run(args):\n if len(args.operations) == 0:\n print get(args.base_dir)\n elif len(args.operations) == 1:\n print get(args.base_dir, args.operations[0])\n elif len(args.operations) == 2:\n put(args.base_dir, args.operations[0], json.loads(args.operations[1]))\n else:\n raise Exception(\"Invalid number of arguments\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser_setup(parser)\n args = parser.parse_args()\n args.func(args)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# program name: an2_colour.py
# no optional arguments: Uses Wine data to display information about the relationship of
# various attributes with colour and hue
print('========================================================================================')
print('========================================================================================')
print('> start of program an2_colour.py')
print('> import libraries')
import argparse
import os.path as op
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy.polynomial.polynomial import polyfit
print('> define convert_type function')
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
print("> define get_delim function")
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
# column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue',
# 'OD280/OD315 of diluted wines','Proline']
column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',
'od','proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
def parse_file(data_file, dlm, debug=False): # took delimiter out
print('> executing parse_file')
# Verify the file exists
assert(op.isfile(data_file))
# open it as a csv
with open(data_file, 'r') as fhandle:
csv_reader = csv.reader(fhandle, delimiter=dlm)
# Add each line in the file to a list
lines = []
if debug:
count = 0
for line in csv_reader:
if debug:
if count > 2:
break
count += 1
newline = []
for value in line:
newline += [convert_type(value)]
if len(newline) > 0:
lines += [newline]
print('> view a few lines')
print(' ')
for line in lines[0:2]:
print(line)
print(' ')
# Return all the contents of our file
return lines
# class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',
# 'od','proline
def plot_data3(dd, col1, label1,
col2a, col2b,
label2a, label2b, n,
debug=False):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float) # need these for the lines below
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
# print(df)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra) # left side
ax1.scatter(df[col1], df[col2a], color=clra, marker = '^')
xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb) # we already handled the x-label with ax1
# ax2.plot(df[col1], df[col2b], color=color)
ax2.scatter(df[col1], df[col2b], color= clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig('an2_colour' + n + '.png')
plt.show()
# Cases where there is a possible correlation with colour intensity or hue.
# color intensity:
# check against : alc, flav, od, proline
# hue:
# check against: ma, tphen, flav, pac, od
def main():
data_file = "wine.data"
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
#print(data_dictionary)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav', 'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od', 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "594479c22cada665dcdc76737085ce342d7d5faf",
"index": 1480,
"step-1": "<mask token>\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\n<mask token>\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\n<mask token>\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\n<mask token>\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\n<mask token>\n",
"step-3": "print(\n '========================================================================================'\n )\nprint(\n '========================================================================================'\n )\nprint('> start of program an2_colour.py')\nprint('> import libraries')\n<mask token>\nprint('> define convert_type function')\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\nprint('> define get_delim function')\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "print(\n '========================================================================================'\n )\nprint(\n '========================================================================================'\n )\nprint('> start of program an2_colour.py')\nprint('> import libraries')\nimport argparse\nimport os.path as op\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\nprint('> define convert_type function')\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\nprint('> define get_delim function')\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# program name: an2_colour.py\n\n# no optional arguments: Uses Wine data to display information about the relationship of \n# various attributes with colour and hue \n\nprint('========================================================================================')\nprint('========================================================================================')\n\nprint('> start of program an2_colour.py')\nprint('> import libraries')\n\nimport argparse\nimport os.path as op\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\n\nprint('> define convert_type function')\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\nprint(\"> define get_delim function\")\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r') \n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' ' \n print(' ')\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n # column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue',\n # 'OD280/OD315 of diluted wines','Proline']\n column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n 'od','proline']\n \n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\ndef parse_file(data_file, dlm, debug=False): # took delimiter out\n print('> executing parse_file')\n # Verify the file exists\n assert(op.isfile(data_file))\n\n # open it as a csv \n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n # Add each line in the file to a list\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n # Return all the contents of our file\n return lines\n\n\n# class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n# 'od','proline\n\n \ndef plot_data3(dd, col1, label1, \n col2a, col2b,\n label2a, label2b, n,\n debug=False):\n df = pd.DataFrame.from_dict(dd) \n x = np.fromiter(dd[col1], dtype=float) # need these for the lines below\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n\n # print(df) \n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra) # left side\n\n ax1.scatter(df[col1], df[col2a], color=clra, marker = '^')\n\n xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb) # we already handled the x-label with ax1\n # ax2.plot(df[col1], df[col2b], color=color)\n ax2.scatter(df[col1], df[col2b], color= clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n\n xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n# Cases where there is a possible correlation with colour intensity or hue. \n# color intensity:\n# check against : alc, flav, od, proline\n# hue:\n# check against: ma, tphen, flav, pac, od\n\ndef main():\n\n data_file = \"wine.data\"\n dlm = get_delim(data_file) \n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n #print(data_dictionary)\n\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav', 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od', 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
n, k = [int(input()) for _ in range(2)]
ans = 1
for _ in range(n):
ans = min(ans * 2, ans + k)
print(ans)
|
normal
|
{
"blob_id": "bb730606c7357eeb605292d5b9c05e8e8a797ea2",
"index": 5461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(n):\n ans = min(ans * 2, ans + k)\nprint(ans)\n",
"step-3": "n, k = [int(input()) for _ in range(2)]\nans = 1\nfor _ in range(n):\n ans = min(ans * 2, ans + k)\nprint(ans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Spelling bee NYT puzzle solver
with open('words.txt') as words_fh:
# Converts strips and lowercases lexicon (space seperated txt file)
# Use set to remove duplicates (decasing)
lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))
# NOTE: Could add a CLI to allow users to input this. Manual edits are the way for now
MANDATORY_LETTER = 'l'
LETTERS = set(['t','i','e','v','p','x'] + [MANDATORY_LETTER])
# Search for valid words
valid_words = [word for word in lexicon if set(word).issubset(LETTERS) and MANDATORY_LETTER in set(word) and len(word) >= 4]
sorted_valid_words = sorted(valid_words, key=lambda x: len(x))
print(sorted_valid_words)
|
normal
|
{
"blob_id": "aacd5d671090c3305a53d62c3c6c25d4c033f42d",
"index": 6420,
"step-1": "<mask token>\n",
"step-2": "with open('words.txt') as words_fh:\n lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\n<mask token>\nprint(sorted_valid_words)\n",
"step-3": "with open('words.txt') as words_fh:\n lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\nMANDATORY_LETTER = 'l'\nLETTERS = set(['t', 'i', 'e', 'v', 'p', 'x'] + [MANDATORY_LETTER])\nvalid_words = [word for word in lexicon if set(word).issubset(LETTERS) and \n MANDATORY_LETTER in set(word) and len(word) >= 4]\nsorted_valid_words = sorted(valid_words, key=lambda x: len(x))\nprint(sorted_valid_words)\n",
"step-4": "# Spelling bee NYT puzzle solver\r\n\r\nwith open('words.txt') as words_fh:\r\n # Converts strips and lowercases lexicon (space seperated txt file)\r\n # Use set to remove duplicates (decasing)\r\n\tlexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\r\n\r\n# NOTE: Could add a CLI to allow users to input this. Manual edits are the way for now\r\nMANDATORY_LETTER = 'l'\r\nLETTERS = set(['t','i','e','v','p','x'] + [MANDATORY_LETTER])\r\n\r\n# Search for valid words \r\nvalid_words = [word for word in lexicon if set(word).issubset(LETTERS) and MANDATORY_LETTER in set(word) and len(word) >= 4]\r\nsorted_valid_words = sorted(valid_words, key=lambda x: len(x))\r\nprint(sorted_valid_words)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Visit 2.12.3 log file
ScriptVersion = "2.12.3"
if ScriptVersion != Version():
print "This script is for VisIt %s. It may not work with version %s" % (ScriptVersion, Version())
visit.ShowAllWindows()
visit.ShowAllWindows()
visit.OpenDatabase("test.vtk", 0)
# The UpdateDBPluginInfo RPC is not supported in the VisIt module so it will not be logged.
visit.AddPlot("Pseudocolor", "scalars", 1, 1)
visit.DrawPlots()
SaveWindowAtts = visit.SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 1
SaveWindowAtts.outputDirectory = "."
SaveWindowAtts.fileName = "visit"
SaveWindowAtts.family = 1
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1024
SaveWindowAtts.height = 1024
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
visit.SetSaveWindowAttributes(SaveWindowAtts)
visit.SaveWindow()
# Begin spontaneous state
View3DAtts = visit.View3DAttributes()
View3DAtts.viewNormal = (0.264045, 0.220135, 0.939053)
View3DAtts.focus = (1, 1, 1)
View3DAtts.viewUp = (0.100817, 0.961974, -0.253856)
View3DAtts.viewAngle = 30
View3DAtts.parallelScale = 1.73205
View3DAtts.nearPlane = -3.4641
View3DAtts.farPlane = 3.4641
View3DAtts.imagePan = (0, 0)
View3DAtts.imageZoom = 1
View3DAtts.perspective = 1
View3DAtts.eyeAngle = 2
View3DAtts.centerOfRotationSet = 0
View3DAtts.centerOfRotation = (1, 1, 1)
View3DAtts.axis3DScaleFlag = 0
View3DAtts.axis3DScales = (1, 1, 1)
View3DAtts.shear = (0, 0, 1)
View3DAtts.windowValid = 1
visit.SetView3D(View3DAtts)
# End spontaneous state
|
normal
|
{
"blob_id": "6d0cfc9d5bbc45bfa356c45a7cdb9f4822b03e0a",
"index": 2983,
"step-1": "# Visit 2.12.3 log file\nScriptVersion = \"2.12.3\"\nif ScriptVersion != Version():\n print \"This script is for VisIt %s. It may not work with version %s\" % (ScriptVersion, Version())\nvisit.ShowAllWindows()\nvisit.ShowAllWindows()\nvisit.OpenDatabase(\"test.vtk\", 0)\n# The UpdateDBPluginInfo RPC is not supported in the VisIt module so it will not be logged.\nvisit.AddPlot(\"Pseudocolor\", \"scalars\", 1, 1)\nvisit.DrawPlots()\nSaveWindowAtts = visit.SaveWindowAttributes()\nSaveWindowAtts.outputToCurrentDirectory = 1\nSaveWindowAtts.outputDirectory = \".\"\nSaveWindowAtts.fileName = \"visit\"\nSaveWindowAtts.family = 1\nSaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY\nSaveWindowAtts.width = 1024\nSaveWindowAtts.height = 1024\nSaveWindowAtts.screenCapture = 0\nSaveWindowAtts.saveTiled = 0\nSaveWindowAtts.quality = 80\nSaveWindowAtts.progressive = 0\nSaveWindowAtts.binary = 0\nSaveWindowAtts.stereo = 0\nSaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate\nSaveWindowAtts.forceMerge = 0\nSaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions\nSaveWindowAtts.advancedMultiWindowSave = 0\nvisit.SetSaveWindowAttributes(SaveWindowAtts)\nvisit.SaveWindow()\n# Begin spontaneous state\nView3DAtts = visit.View3DAttributes()\nView3DAtts.viewNormal = (0.264045, 0.220135, 0.939053)\nView3DAtts.focus = (1, 1, 1)\nView3DAtts.viewUp = (0.100817, 0.961974, -0.253856)\nView3DAtts.viewAngle = 30\nView3DAtts.parallelScale = 1.73205\nView3DAtts.nearPlane = -3.4641\nView3DAtts.farPlane = 3.4641\nView3DAtts.imagePan = (0, 0)\nView3DAtts.imageZoom = 1\nView3DAtts.perspective = 1\nView3DAtts.eyeAngle = 2\nView3DAtts.centerOfRotationSet = 0\nView3DAtts.centerOfRotation = (1, 1, 1)\nView3DAtts.axis3DScaleFlag = 0\nView3DAtts.axis3DScales = (1, 1, 1)\nView3DAtts.shear = (0, 0, 1)\nView3DAtts.windowValid = 1\nvisit.SetView3D(View3DAtts)\n# End spontaneous state\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from xai.brain.wordbase.verbs._essay import _ESSAY
#calss header
class _ESSAYED(_ESSAY, ):
def __init__(self,):
_ESSAY.__init__(self)
self.name = "ESSAYED"
self.specie = 'verbs'
self.basic = "essay"
self.jsondata = {}
|
normal
|
{
"blob_id": "dc2cbbaca3c35f76ac09c93a2e8ad13eb0bdfce6",
"index": 4086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass _ESSAYED(_ESSAY):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass _ESSAYED(_ESSAY):\n\n def __init__(self):\n _ESSAY.__init__(self)\n self.name = 'ESSAYED'\n self.specie = 'verbs'\n self.basic = 'essay'\n self.jsondata = {}\n",
"step-4": "from xai.brain.wordbase.verbs._essay import _ESSAY\n\n\nclass _ESSAYED(_ESSAY):\n\n def __init__(self):\n _ESSAY.__init__(self)\n self.name = 'ESSAYED'\n self.specie = 'verbs'\n self.basic = 'essay'\n self.jsondata = {}\n",
"step-5": "\n\nfrom xai.brain.wordbase.verbs._essay import _ESSAY\n\n#calss header\nclass _ESSAYED(_ESSAY, ):\n\tdef __init__(self,): \n\t\t_ESSAY.__init__(self)\n\t\tself.name = \"ESSAYED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"essay\"\n\t\tself.jsondata = {}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import bootcamp_utils
import numba
@numba.jit(nopython=True)
def backtrack_steps():
"""
Compute the number of steps it takes a 1d random walker starting
at zero to get to +1.
"""
# Initialize position and number of steps
x = 0
n_steps = 0
# Walk until we get to positive 1
while x < 1:
x += 2 * np.random.randint(0, 2) - 1
n_steps += 1
return n_steps
# Stepping time
tau = 0.5 # seconds
# Specify number of samples
n_samples = 10000
# Array of backtrack times
t_bt = np.empty(n_samples)
# Generate the samples
for i in range(n_samples):
t_bt[i] = backtrack_steps()
# Convert to seconds
t_bt *= tau
plt.figure(1)
_ = plt.hist(t_bt, bins=100, normed=True)
plt.xlabel('time (s)')
plt.ylabel('PDF')
def ecdf(data):
return np.sort(data), np.arange(1, len(data)+1) / len(data)
# Generate x, y values
x, y = ecdf(t_bt)
plt.figure(2)
# Plot CDF from random numbers
plt.semilogx(x, y, '.', markersize=10)
# Clean up plot
plt.margins(y=0.02)
plt.xlabel('time (s)')
plt.ylabel('ECDF')
plt.figure(3)
# Plot the CCDF
plt.loglog(x, 1 - y, '.')
# Plot the asymptotic power law
t_smooth = np.logspace(0.5, 8, 100)
plt.loglog(t_smooth, 1 / np.sqrt(t_smooth))
# Label axes
plt.xlabel('time (s)')
plt.ylabel('CCDF')
plt.show()
|
normal
|
{
"blob_id": "00a2992af78f9edadd3f4cbc7d073c1f74fcd9a2",
"index": 2810,
"step-1": "<mask token>\n\n\[email protected](nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\n<mask token>\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsns.set()\n<mask token>\n\n\[email protected](nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\n<mask token>\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n<mask token>\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\n<mask token>\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\n<mask token>\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-3": "<mask token>\nsns.set()\n<mask token>\n\n\[email protected](nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\ntau = 0.5\nn_samples = 10000\nt_bt = np.empty(n_samples)\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\nx, y = ecdf(t_bt)\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport bootcamp_utils\nimport numba\n\n\[email protected](nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n x = 0\n n_steps = 0\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n return n_steps\n\n\ntau = 0.5\nn_samples = 10000\nt_bt = np.empty(n_samples)\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\nt_bt *= tau\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data) + 1) / len(data)\n\n\nx, y = ecdf(t_bt)\nplt.figure(2)\nplt.semilogx(x, y, '.', markersize=10)\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\nplt.figure(3)\nplt.loglog(x, 1 - y, '.')\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\nplt.show()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport bootcamp_utils\nimport numba\n\n\n\[email protected](nopython=True)\ndef backtrack_steps():\n \"\"\"\n Compute the number of steps it takes a 1d random walker starting\n at zero to get to +1.\n \"\"\"\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps\n\n\n# Stepping time\ntau = 0.5 # seconds\n\n# Specify number of samples\nn_samples = 10000\n\n# Array of backtrack times\nt_bt = np.empty(n_samples)\n\n# Generate the samples\nfor i in range(n_samples):\n t_bt[i] = backtrack_steps()\n\n# Convert to seconds\nt_bt *= tau\n\nplt.figure(1)\n_ = plt.hist(t_bt, bins=100, normed=True)\nplt.xlabel('time (s)')\nplt.ylabel('PDF')\n\n\ndef ecdf(data):\n return np.sort(data), np.arange(1, len(data)+1) / len(data)\n\n# Generate x, y values\nx, y = ecdf(t_bt)\n\nplt.figure(2)\n# Plot CDF from random numbers\nplt.semilogx(x, y, '.', markersize=10)\n\n# Clean up plot\nplt.margins(y=0.02)\nplt.xlabel('time (s)')\nplt.ylabel('ECDF')\n\n\nplt.figure(3)\n# Plot the CCDF\nplt.loglog(x, 1 - y, '.')\n\n# Plot the asymptotic power law\nt_smooth = np.logspace(0.5, 8, 100)\nplt.loglog(t_smooth, 1 / np.sqrt(t_smooth))\n\n# Label axes\nplt.xlabel('time (s)')\nplt.ylabel('CCDF')\n\nplt.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import smtplib
import subprocess
import time
class NotifyError(Exception):
def __init__(self, message):
self.message = message
class Notification(object):
def __init__(self, config, dry_run):
self.dry_run = dry_run
self.notifications = {}
def submit(self, recipient, message):
if recipient not in self.notifications:
self.notifications[recipient] = []
self.notifications[recipient].append(message)
def notify_all(self):
for recip in self.notifications:
if len(self.notifications[recip]) > 0:
self.notify(recip, '\r\n\r\n-------------------\r\n\r\n'.join(self.notifications[recip]))
time.sleep(5)
self.notifications[recip] = []
def notify(self, recipient, message):
raise NotImplementedError('Need to subclass Notification')
def connect(self):
raise NotImplementedError('Need to subclass Notification')
def close(self):
raise NotImplementedError('Need to subclass Notification')
class SendMail(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.address = config.sendmail.address
self.contact_info = config.sendmail.contact_info
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
def notify(self, recipient, message):
# -i flag: do NOT treat bare dot as EOF
cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.contact_info[recipient]['address']]
msg = self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
proc = subprocess.Popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=msg.encode('utf-8'))
#TODO handle errors
#print(f"ret: {proc.returncode}")
#print("stdout:" + str(out))
#print("stderr:" + str(err))
def connect(self):
pass
def close(self):
pass
class SMTP(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.hostname = config.smtp.hostname
self.username = config.smtp.username
self.passwd = config.smtp.passwd
self.address = config.smtp.address
self.contact_info = config.smtp.contact_info
self.connected = False
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
#TODO deal with smtplib exceptions
def connect(self):
self.server = smtplib.SMTP(self.hostname)
self.server.ehlo()
self.server.starttls()
self.server.login(self.username, self.passwd)
self.connected = True
#TODO implement saving messages to disk with timestamp if send fails
#TODO deal with smtplib exceptions
def notify(self, recipient, message):
if not self.connected:
raise NotifyError('Not connected to SMTP server; cannot send notifications')
self.server.sendmail(self.address,
self.contact_info[recipient]['address'],
self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
)
#TODO deal with smtplib exceptions
def close(self):
if self.connected:
self.server.quit()
self.connected = False
|
normal
|
{
"blob_id": "01849a6bf5ce5eb75c549af28312f61711ad2494",
"index": 4425,
"step-1": "<mask token>\n\n\nclass Notification(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-2": "<mask token>\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-3": "<mask token>\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n <mask token>\n <mask token>\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n\n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-4": "import smtplib\nimport subprocess\nimport time\n\n\nclass NotifyError(Exception):\n\n def __init__(self, message):\n self.message = message\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n\n def submit(self, recipient, message):\n if recipient not in self.notifications:\n self.notifications[recipient] = []\n self.notifications[recipient].append(message)\n\n def notify_all(self):\n for recip in self.notifications:\n if len(self.notifications[recip]) > 0:\n self.notify(recip, '\\r\\n\\r\\n-------------------\\r\\n\\r\\n'.\n join(self.notifications[recip]))\n time.sleep(5)\n self.notifications[recip] = []\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n\n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-5": "import smtplib\nimport subprocess\nimport time\n\nclass NotifyError(Exception):\n def __init__(self, message):\n self.message = message\n\nclass Notification(object):\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n\n def submit(self, recipient, message):\n if recipient not in self.notifications:\n self.notifications[recipient] = []\n self.notifications[recipient].append(message)\n\n def notify_all(self):\n for recip in self.notifications:\n if len(self.notifications[recip]) > 0:\n self.notify(recip, '\\r\\n\\r\\n-------------------\\r\\n\\r\\n'.join(self.notifications[recip]))\n time.sleep(5)\n self.notifications[recip] = []\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n \n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: '+self.address,\n 'To: {}',\n 'Subject: ['+config.name+'] Notifications',\n '',\n 'Greetings Human {},',\n '',\n '{}'\n '',\n '',\n 'Beep boop,',\n config.name + ' Bot'])\n\n def notify(self, recipient, message):\n # -i flag: do NOT treat bare dot as EOF\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n #TODO handle errors\n #print(f\"ret: {proc.returncode}\")\n #print(\"stdout:\" + str(out))\n #print(\"stderr:\" + str(err))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\nclass SMTP(Notification):\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: '+self.address,\n 'To: {}',\n 'Subject: ['+config.name+'] Notifications',\n '',\n 'Greetings Human {},',\n '',\n '{}'\n '',\n '',\n 'Beep boop,',\n config.name + ' Bot'])\n\n #TODO deal with smtplib exceptions\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n #TODO implement saving messages to disk with timestamp if send fails\n #TODO deal with smtplib exceptions\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError('Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, \n\t\t\t\tself.contact_info[recipient]['address'], \n\t\t\t\tself.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)\n )\n\n #TODO deal with smtplib exceptions\n def close(self):\n if self.connected: \n self.server.quit()\n self.connected = False\n\n\n\n",
"step-ids": [
11,
12,
15,
20,
21
]
}
|
[
11,
12,
15,
20,
21
] |
import cv2
import sys
# Load the Haar cascades
face_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write("1")
else:
sys.stdout.write("0")
|
normal
|
{
"blob_id": "4d707e23f66e8b6bea05a5901d3d8e459247c6c1",
"index": 3840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncapture.release()\ncv2.destroyAllWindows()\n<mask token>\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-3": "<mask token>\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-4": "import cv2\nimport sys\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-5": "import cv2\nimport sys\n\n# Load the Haar cascades\nface_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\n\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ncapture.release()\ncv2.destroyAllWindows()\n\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write(\"1\")\nelse:\n sys.stdout.write(\"0\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from math import log
result = []
formula = int(input("For exit press 0\nChoose the formula #1 #2 #3: "))
while (formula >= 0) and (formula <= 3):
a = float(input("Enter a:"))
min_x = float(input("Enter minx:"))
max_x = float(input("Enter maxx:"))
step = int(input("Enter steps:"))
x = min_x
if formula == 1:
d = (-45*a**2+26*a*x+7*x**2)
if d !=0:
for i in range(step):
while x <= max_x:
G = ((-7*(4*a**2+15*a*x-4*x**2))/d)
result.append(G)
print("x=%.3f \tG=%.3f" % (float(x), G))
x += (max_x-min_x)/(step-1)
break
else:
print("Err")
elif formula == 2:
for i in range(step):
while x <= max_x:
F = (2**(40*(a**2)-107*a*x+63*(x**2)))
result.append(F)
print("x=%.3f \tF=%.3f" % (float(x), F))
x += (max_x-min_x)/(step-1)
break
elif formula == 3:
for i in range(step):
while x <= max_x:
Y = log(a**2-2*a*x+3*x**2+1)
result.append(Y)
print("x=%.3f \tY=%.3f" % (float(x), Y))
x += (max_x-min_x)/(step-1)
break
else:
print("Err")
print("Max.res. = ", max(result))
print("Min.res. = ", min(result))
|
normal
|
{
"blob_id": "44c4a1f4b32b45fd95eb8b0a42a718d05d967e04",
"index": 2536,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile formula >= 0 and formula <= 3:\n a = float(input('Enter a:'))\n min_x = float(input('Enter minx:'))\n max_x = float(input('Enter maxx:'))\n step = int(input('Enter steps:'))\n x = min_x\n if formula == 1:\n d = -45 * a ** 2 + 26 * a * x + 7 * x ** 2\n if d != 0:\n for i in range(step):\n while x <= max_x:\n G = -7 * (4 * a ** 2 + 15 * a * x - 4 * x ** 2) / d\n result.append(G)\n print('x=%.3f \\tG=%.3f' % (float(x), G))\n x += (max_x - min_x) / (step - 1)\n break\n else:\n print('Err')\n elif formula == 2:\n for i in range(step):\n while x <= max_x:\n F = 2 ** (40 * a ** 2 - 107 * a * x + 63 * x ** 2)\n result.append(F)\n print('x=%.3f \\tF=%.3f' % (float(x), F))\n x += (max_x - min_x) / (step - 1)\n break\n elif formula == 3:\n for i in range(step):\n while x <= max_x:\n Y = log(a ** 2 - 2 * a * x + 3 * x ** 2 + 1)\n result.append(Y)\n print('x=%.3f \\tY=%.3f' % (float(x), Y))\n x += (max_x - min_x) / (step - 1)\n break\nelse:\n print('Err')\nprint('Max.res. = ', max(result))\nprint('Min.res. = ', min(result))\n",
"step-3": "<mask token>\nresult = []\nformula = int(input(\"\"\"For exit press 0\nChoose the formula #1 #2 #3: \"\"\"))\nwhile formula >= 0 and formula <= 3:\n a = float(input('Enter a:'))\n min_x = float(input('Enter minx:'))\n max_x = float(input('Enter maxx:'))\n step = int(input('Enter steps:'))\n x = min_x\n if formula == 1:\n d = -45 * a ** 2 + 26 * a * x + 7 * x ** 2\n if d != 0:\n for i in range(step):\n while x <= max_x:\n G = -7 * (4 * a ** 2 + 15 * a * x - 4 * x ** 2) / d\n result.append(G)\n print('x=%.3f \\tG=%.3f' % (float(x), G))\n x += (max_x - min_x) / (step - 1)\n break\n else:\n print('Err')\n elif formula == 2:\n for i in range(step):\n while x <= max_x:\n F = 2 ** (40 * a ** 2 - 107 * a * x + 63 * x ** 2)\n result.append(F)\n print('x=%.3f \\tF=%.3f' % (float(x), F))\n x += (max_x - min_x) / (step - 1)\n break\n elif formula == 3:\n for i in range(step):\n while x <= max_x:\n Y = log(a ** 2 - 2 * a * x + 3 * x ** 2 + 1)\n result.append(Y)\n print('x=%.3f \\tY=%.3f' % (float(x), Y))\n x += (max_x - min_x) / (step - 1)\n break\nelse:\n print('Err')\nprint('Max.res. = ', max(result))\nprint('Min.res. = ', min(result))\n",
"step-4": "from math import log\nresult = []\nformula = int(input(\"\"\"For exit press 0\nChoose the formula #1 #2 #3: \"\"\"))\nwhile formula >= 0 and formula <= 3:\n a = float(input('Enter a:'))\n min_x = float(input('Enter minx:'))\n max_x = float(input('Enter maxx:'))\n step = int(input('Enter steps:'))\n x = min_x\n if formula == 1:\n d = -45 * a ** 2 + 26 * a * x + 7 * x ** 2\n if d != 0:\n for i in range(step):\n while x <= max_x:\n G = -7 * (4 * a ** 2 + 15 * a * x - 4 * x ** 2) / d\n result.append(G)\n print('x=%.3f \\tG=%.3f' % (float(x), G))\n x += (max_x - min_x) / (step - 1)\n break\n else:\n print('Err')\n elif formula == 2:\n for i in range(step):\n while x <= max_x:\n F = 2 ** (40 * a ** 2 - 107 * a * x + 63 * x ** 2)\n result.append(F)\n print('x=%.3f \\tF=%.3f' % (float(x), F))\n x += (max_x - min_x) / (step - 1)\n break\n elif formula == 3:\n for i in range(step):\n while x <= max_x:\n Y = log(a ** 2 - 2 * a * x + 3 * x ** 2 + 1)\n result.append(Y)\n print('x=%.3f \\tY=%.3f' % (float(x), Y))\n x += (max_x - min_x) / (step - 1)\n break\nelse:\n print('Err')\nprint('Max.res. = ', max(result))\nprint('Min.res. = ', min(result))\n",
"step-5": "from math import log\n\nresult = []\n\nformula = int(input(\"For exit press 0\\nChoose the formula #1 #2 #3: \"))\nwhile (formula >= 0) and (formula <= 3):\n a = float(input(\"Enter a:\"))\n min_x = float(input(\"Enter minx:\"))\n max_x = float(input(\"Enter maxx:\"))\n step = int(input(\"Enter steps:\"))\n x = min_x\n\n if formula == 1:\n d = (-45*a**2+26*a*x+7*x**2)\n if d !=0:\n for i in range(step):\n while x <= max_x:\n G = ((-7*(4*a**2+15*a*x-4*x**2))/d)\n result.append(G)\n print(\"x=%.3f \\tG=%.3f\" % (float(x), G))\n x += (max_x-min_x)/(step-1)\n break\n else:\n print(\"Err\")\n\n elif formula == 2:\n for i in range(step):\n while x <= max_x:\n F = (2**(40*(a**2)-107*a*x+63*(x**2)))\n result.append(F)\n print(\"x=%.3f \\tF=%.3f\" % (float(x), F))\n x += (max_x-min_x)/(step-1)\n break\n\n elif formula == 3:\n for i in range(step):\n while x <= max_x:\n Y = log(a**2-2*a*x+3*x**2+1)\n result.append(Y)\n print(\"x=%.3f \\tY=%.3f\" % (float(x), Y))\n x += (max_x-min_x)/(step-1)\n break\nelse:\n print(\"Err\")\nprint(\"Max.res. = \", max(result))\nprint(\"Min.res. = \", min(result))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 15:36:38 2021
@author: mav24
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor
from sklearn.metrics import r2_score as r2
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split
"""
Reading the training data
"""
path = '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'
data = pd.read_excel(path)
#data.drop(columns=['Unnamed: 0', 'diesel', 'station wagon'], inplace=True)
drop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders', 'encoded origin']
data.drop(columns=drop, inplace=True)
# Scaling the data Standar sceler
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
"""
# Outliers Detection
iso = IsolationForest(contamination=0.05)
yhat = iso.fit_predict(X_scaled)
mask = yhat != -1
X_scaled, Y = X_scaled[mask, :], Y[mask]
"""
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
#model = GradientBoostingRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
model_for_cross = ExtraTreesRegressor()
#model_for_cross = GradientBoostingRegressor()
cross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring='neg_root_mean_squared_error')
print(f'Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
pipe = Pipeline(steps=[('scaler', StandardScaler()),
('extr', ExtraTreesRegressor(n_jobs=3))])
param_grid = {'extr__n_estimators':[100],
#'extr__criterion':['squared_error', 'mse', 'mae'],
'extr__max_depth':[None, 10, 20, 50, 100, 200, len(X_train)],
#'extr__min_samples_split':[1,2,3,5,10],
#'extr__min_samples_leaf':[1,2,3,5,10],
'extr__max_features':['auto', 'sqrt', 'log2'],
#'extr__max_leaf_nodes':[None, 1,2,3,4,5],
}
grid = GridSearchCV(pipe, param_grid, scoring='r2')
grid.fit(X_train, Y_train)
print(f'Best estimators for ExtraTreesRegressor: {grid.best_estimator_}')
print(f'Best score is: {grid.best_score_}')
"""
"""
# Scaling the data PowerTransformer
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = PowerTransformer()
X_scaled = scaler.fit_transform(X)
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With PowerTransformer')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
"""
"""
Validate the model to unseen data
"""
#path_val = '/home/mav24/Documents/Development/Regeneration/Project/Data/vavlidation_data.xlsx'
#data_val = pd.read_excel(path_val)
|
normal
|
{
"blob_id": "4a17db6b65e1615b0d519581b3e63bc34ad16093",
"index": 1288,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndata.drop(columns=drop, inplace=True)\n<mask token>\nmodel.fit(X_train, Y_train)\n<mask token>\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n<mask token>\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-3": "<mask token>\npath = (\n '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\n )\ndata = pd.read_excel(path)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',\n 'encoded origin']\ndata.drop(columns=drop, inplace=True)\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n<mask token>\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=\n 0.2, random_state=13)\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\nmodel_for_cross = ExtraTreesRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=\n 'neg_root_mean_squared_error')\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor\nfrom sklearn.metrics import r2_score as r2\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split\n<mask token>\npath = (\n '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\n )\ndata = pd.read_excel(path)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',\n 'encoded origin']\ndata.drop(columns=drop, inplace=True)\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n<mask token>\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=\n 0.2, random_state=13)\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\nmodel_for_cross = ExtraTreesRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=\n 'neg_root_mean_squared_error')\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 15 15:36:38 2021\n\n@author: mav24\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler\n\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor\nfrom sklearn.metrics import r2_score as r2\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\n\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split\n\n\n\"\"\"\nReading the training data\n\"\"\"\npath = '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\ndata = pd.read_excel(path)\n\n#data.drop(columns=['Unnamed: 0', 'diesel', 'station wagon'], inplace=True)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders', 'encoded origin']\ndata.drop(columns=drop, inplace=True)\n\n\n# Scaling the data Standar sceler\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n\n\"\"\"\n# Outliers Detection\niso = IsolationForest(contamination=0.05)\nyhat = iso.fit_predict(X_scaled)\n\nmask = yhat != -1\nX_scaled, Y = X_scaled[mask, :], Y[mask]\n\"\"\"\n\n# Splitting the training data to train and test\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)\n\n\n\n# Training and prediction\nmodel = ExtraTreesRegressor()\n#model = GradientBoostingRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\n\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n\n\n\n\n\nmodel_for_cross = ExtraTreesRegressor()\n#model_for_cross = GradientBoostingRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring='neg_root_mean_squared_error')\nprint(f'Cross validation is: {cross_val} \\n and mean: {np.mean(cross_val)} \\n and std:{np.std(cross_val)}')\n\n\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\npipe = Pipeline(steps=[('scaler', StandardScaler()),\n ('extr', ExtraTreesRegressor(n_jobs=3))])\n\nparam_grid = {'extr__n_estimators':[100],\n #'extr__criterion':['squared_error', 'mse', 'mae'],\n 'extr__max_depth':[None, 10, 20, 50, 100, 200, len(X_train)],\n #'extr__min_samples_split':[1,2,3,5,10],\n #'extr__min_samples_leaf':[1,2,3,5,10],\n 'extr__max_features':['auto', 'sqrt', 'log2'],\n #'extr__max_leaf_nodes':[None, 1,2,3,4,5],\n }\n\ngrid = GridSearchCV(pipe, param_grid, scoring='r2')\ngrid.fit(X_train, Y_train)\nprint(f'Best estimators for ExtraTreesRegressor: {grid.best_estimator_}')\nprint(f'Best score is: {grid.best_score_}')\n\"\"\"\n\n\n\"\"\"\n\n# Scaling the data PowerTransformer\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = PowerTransformer()\nX_scaled = scaler.fit_transform(X)\n\n\n# Splitting the training data to train and test\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)\n\n\n\n# Training and prediction\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\n\n\nprint('With PowerTransformer')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n\n\"\"\"\n\n\"\"\"\nValidate the model to unseen data\n\"\"\"\n\n#path_val = '/home/mav24/Documents/Development/Regeneration/Project/Data/vavlidation_data.xlsx'\n#data_val = pd.read_excel(path_val)\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import logging
from datetime import datetime
import torch
from naruto_skills.training_checker import TrainingChecker
from data_for_train import is_question as my_dataset
from model_def.lstm_attention import LSTMAttention
from utils import pytorch_utils
from train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
BATCH_SIZE = 128
NUM_EPOCHS = 500
NUM_WORKERS = 0
PRINT_EVERY = 100
PREDICT_EVERY = 500
EVAL_EVERY = 500
PRE_TRAINED_MODEL = ''
my_dataset.bootstrap()
train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)
eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)
logging.info('There will be %s steps for training', NUM_EPOCHS * len(train_loader))
model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2)
model.train()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
logging.info('Model architecture: \n%s', model)
logging.info('Total trainable parameters: %s', pytorch_utils.count_parameters(model))
init_step = 0
# Restore model
if PRE_TRAINED_MODEL != '':
checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
init_step = checkpoint.get('step', 0)
logging.info('Load pre-trained model from %s successfully', PRE_TRAINED_MODEL)
root_dir = '/source/main/train/output/'
exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
path_checkpoints = os.path.join(root_dir, 'saved_models', model.__class__.__name__, exp_id)
training_checker = TrainingChecker(model, root_dir=path_checkpoints, init_score=-10000)
path_logging = os.path.join(root_dir, 'logging', model.__class__.__name__, exp_id)
train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY, predict_interval=PREDICT_EVERY,
path_to_file=path_logging + '_train', input_transform=input2_text,
output_transform=target2_text)
eval_logger = EvaluateLogger(path_logging + '_validate')
evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY, eval_logger, training_checker)
training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS, train_logger, evaluator)
training_loop.run()
|
normal
|
{
"blob_id": "77884dd72f5efe91fccad27e6328c4ad34378be2",
"index": 6953,
"step-1": "<mask token>\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(\n train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2\n )\n model.train()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.\n count_parameters(model))\n init_step = 0\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n logging.info('Load pre-trained model from %s successfully',\n PRE_TRAINED_MODEL)\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.\n __class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints,\n init_score=-10000)\n path_logging = os.path.join(root_dir, 'logging', model.__class__.\n __name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,\n predict_interval=PREDICT_EVERY, path_to_file=path_logging +\n '_train', input_transform=input2_text, output_transform=target2_text)\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,\n eval_logger, training_checker)\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,\n train_logger, evaluator)\n training_loop.run()\n",
"step-4": "import os\nimport logging\nfrom datetime import datetime\nimport torch\nfrom naruto_skills.training_checker import TrainingChecker\nfrom data_for_train import is_question as my_dataset\nfrom model_def.lstm_attention import LSTMAttention\nfrom utils import pytorch_utils\nfrom train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(\n train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2\n )\n model.train()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.\n count_parameters(model))\n init_step = 0\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n logging.info('Load pre-trained model from %s successfully',\n PRE_TRAINED_MODEL)\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.\n __class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints,\n init_score=-10000)\n path_logging = os.path.join(root_dir, 'logging', model.__class__.\n __name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,\n predict_interval=PREDICT_EVERY, path_to_file=path_logging +\n '_train', input_transform=input2_text, output_transform=target2_text)\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,\n eval_logger, training_checker)\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,\n train_logger, evaluator)\n training_loop.run()\n",
"step-5": "import os\nimport logging\nfrom datetime import datetime\n\nimport torch\nfrom naruto_skills.training_checker import TrainingChecker\n\nfrom data_for_train import is_question as my_dataset\nfrom model_def.lstm_attention import LSTMAttention\nfrom utils import pytorch_utils\nfrom train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2)\n model.train()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.count_parameters(model))\n\n init_step = 0\n # Restore model\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n\n logging.info('Load pre-trained model from %s successfully', PRE_TRAINED_MODEL)\n\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.__class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints, init_score=-10000)\n\n path_logging = os.path.join(root_dir, 'logging', model.__class__.__name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY, predict_interval=PREDICT_EVERY,\n path_to_file=path_logging + '_train', input_transform=input2_text,\n output_transform=target2_text)\n\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY, eval_logger, training_checker)\n\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS, train_logger, evaluator)\n training_loop.run()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import matplotlib.pyplot as plt
import numpy as np
steps = 10
num_tests = 100
res = []
with open('txt.txt', 'r') as f:
data = f.readlines()
line = 0
for i in range(10, 110, 10):
agg = 0
for j in range(num_tests):
agg += int(data[line])
line += 1
res.append(agg/num_tests)
x = list(range(10, 110, steps))
y = res
z = np.polyfit(x, res, 2)
# print(z)
p = np.poly1d(z)
plt.plot(x, y, 'o')
plt.plot(x, p(x),label = "Best fit 2 degree polynomial")
plt.title("#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)")
plt.xlabel("Number of nodes in fully connected graph")
plt.ylabel("Number of messages")
plt.legend()
# plt.show()
plt.savefig("Messages.svg")
plt.clf()
steps = 10
num_tests = 10
res = []
with open('txt2.txt', 'r') as f:
data = f.readlines()
line = 0
for procs in range(1,13):
times = []
for i in range(10, 110, 10):
temp = 0
for num in range(num_tests):
temp += float(data[line].split()[1])
line += 3
times.append(temp/num_tests)
res.append(times)
x = list(range(10, 110, steps))
y = res
# z = np.polyfit(x, res, 2)
# print(z)
# p = np.poly1d(z)
# plt.plot(x, y, 'o')
# plt.plot(x, p(x),label = "Best fit 2 degree polynomial")
plt.title("Time taken vs. number of cores used (Averaged over 10 runs)")
plt.xlabel("Number of nodes in fully connected graph")
plt.ylabel("Time taken (in seconds)")
# for procs in range(1,13):
for procs in [1,2,4,8,12]:
plt.plot(x,res[procs-1],label = str((procs))+' Cores')
plt.legend()
# plt.show()
plt.savefig("Time.svg")
|
normal
|
{
"blob_id": "176ffac7ad47f5c43a24acc664631f8353ec5100",
"index": 967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\n<mask token>\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\n<mask token>\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\n<mask token>\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"step-3": "<mask token>\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\n\nsteps = 10\nnum_tests = 100\n\nres = []\n\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg/num_tests)\n\nx = list(range(10, 110, steps))\ny = res\n\nz = np.polyfit(x, res, 2)\n# print(z)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Number of messages\")\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Messages.svg\")\n\nplt.clf()\nsteps = 10\nnum_tests = 10\n\nres = []\n\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1,13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp/num_tests)\n res.append(times)\n\nx = list(range(10, 110, steps))\ny = res\n\n# z = np.polyfit(x, res, 2)\n# print(z)\n# p = np.poly1d(z)\n# plt.plot(x, y, 'o')\n# plt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"Time taken vs. number of cores used (Averaged over 10 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Time taken (in seconds)\")\n\n# for procs in range(1,13):\nfor procs in [1,2,4,8,12]:\n plt.plot(x,res[procs-1],label = str((procs))+' Cores')\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Time.svg\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import template
register = template.Library()
@register.filter(name='range')
def filter_range(start, end=None):
if end is None:
return range(start)
else:
return range(start, end)
|
normal
|
{
"blob_id": "f733885eed5d1cbf6e49db0997655ad627c9d795",
"index": 599,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-4": "from django import template\nregister = template.Library()\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import random
#import matplotlib.pyplot as plt
import numpy as np
import time
class Waterfilling:
"""
initializes x and r with optimal flow allocations
and link fair share rates for traffic matrix routes and link
capacities c, and level with number of levels
after running the waterfilling algorithm. note
that if sum of flow allocations at a link is less than capacity
then fair share of link is float('inf').
not that routes and c must be initialized before calling this.
"""
def __init__(self, routes, c, log, prec_library):
#log = True
#print "Waterfilling"
#print mpmath.mp
(self.num_flows, self.num_links) = routes.shape
self.levels = np.ones((self.num_links, 1)) * float('inf')
self.prec_library = prec_library
eps = prec_library.eps1
weights = np.ones((self.num_flows,1))
#print("weights", weights.shape, weights)
#print("routes", routes.shape, routes)
#self.r = np.ones((self.num_links,1)) * mpf_inf
#self.x = np.ones((self.num_flows,1)) * mpf_inf
x = np.zeros((self.num_flows,1))
active_flows = np.ones((self.num_flows, 1), dtype=bool)
rem_cap = c #np.ones((self.num_links, 1)) * prec_library.mpf_one
# for i in range(self.num_links):
# rem_cap[i] = prec_library.mpf(c[i,0])
self.max_level = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
while num_active_flows > 0:
# number of rem flows on all links
link_weights = np.dot(routes.T, weights)
assert(rem_cap.shape == link_weights.shape)
try:
fair_shares = np.where(link_weights>0, rem_cap/link_weights, float('inf'))
except:
pass
#print("link_weights", link_weights)
#print("rem_cap", rem_cap)
#print("fair_shares", fair_shares)
fair_shares.reshape(self.num_links, 1)
bl = np.argmin(fair_shares)
#print ("bl",type(bl),bl)
inc = float(fair_shares[bl, 0])
assert(inc < float('inf'))
# increase level, only when link with smallest fair share rate
# has a rate larger than last one, handles the following example
# two links, each cap 10.0, each has one flow, and none in common
# each link identified in different iterations of this loop
if self.max_level == 0 or inc > eps: self.max_level += 1
x = np.where(active_flows, x + inc * weights, x)
if log:
print "In round",self.max_level,\
" link", bl, "has smallest fair share", inc, "b/s",\
"Next rate increase is", inc, " (type ", type(inc), ") cuz of bl ",\
bl, " with rem_cap ", rem_cap[bl,0], " b/s",\
"and ", link_weights[bl,0] , " of the total ",\
num_active_flows, " remaining flows"
rem_cap = rem_cap - inc * link_weights
neg_cap = list(np.where(rem_cap < -1e7)[0]) # for each (aka only) column
if (len(neg_cap) > 0):
print >> sys.stderr, "warning! in watefilling hp links with neg. rem_cap ", neg_cap
bf = np.where(routes[:,bl] > 0)[0]
active_flows[bf] = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
weights[bf] = 0
self.levels[bl] = self.max_level
# get max. rate at each link
r = np.ones((self.num_links,1)) * float('inf')
for e in range(self.num_links):
flows = np.nonzero(routes[:, e])[0]
if len(flows) > 0:
sum_demands = sum(x[flows])[0]
cap = c[e,0]
diff = abs(sum_demands - cap)
if (sum_demands > cap or diff < eps):
r[e] = max(x[flows])
print "link",e,"has rate", r[e]
self.level = self.max_level
self.x = x
self.r = r
self.bottleneck_links_arr = np.where(self.r < float('inf'))[0]
self.bottleneck_links = {}
self.non_bottleneck_links = {}
self.sat_flows = {}
self.unsat_flows = {}
# class Eps:
# def __init__(self):
# self.eps1 = 1e-7
# pass
# def main():
# for num_flows in [10, 100, 1000, 10000]:
# start = time.time()
# routes = np.ones((num_flows, 2))
# routes[:, 1] = 0
# routes[0:2, 1] = 1
# routes[0, 0] = 0
# c = np.ones((2,1))
# wf = Waterfilling(routes, c, True, Eps())
# stop = time.time()
# elapsed = stop - start
# print("num_flows", num_flows, "elapsed", elapsed,"s")
# #print wf.x
# #print wf.r
# #print wf.level
# pass
# main()
|
normal
|
{
"blob_id": "93e534e8d425510b59310dcbfc5bca9cc32f245e",
"index": 9798,
"step-1": "import sys\nimport random\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nclass Waterfilling:\n \"\"\"\n initializes x and r with optimal flow allocations\n and link fair share rates for traffic matrix routes and link\n capacities c, and level with number of levels\n after running the waterfilling algorithm. note\n that if sum of flow allocations at a link is less than capacity\n then fair share of link is float('inf').\n not that routes and c must be initialized before calling this.\n \"\"\" \n\n def __init__(self, routes, c, log, prec_library):\n #log = True\n #print \"Waterfilling\"\n #print mpmath.mp\n \n (self.num_flows, self.num_links) = routes.shape\n self.levels = np.ones((self.num_links, 1)) * float('inf')\n self.prec_library = prec_library\n \n eps = prec_library.eps1\n weights = np.ones((self.num_flows,1))\n #print(\"weights\", weights.shape, weights)\n #print(\"routes\", routes.shape, routes)\n #self.r = np.ones((self.num_links,1)) * mpf_inf\n #self.x = np.ones((self.num_flows,1)) * mpf_inf \n\n x = np.zeros((self.num_flows,1))\n active_flows = np.ones((self.num_flows, 1), dtype=bool)\n\n \n rem_cap = c #np.ones((self.num_links, 1)) * prec_library.mpf_one\n # for i in range(self.num_links):\n # rem_cap[i] = prec_library.mpf(c[i,0])\n\n\n self.max_level = 0\n num_active_flows = np.count_nonzero(active_flows, axis=0)\n #print(num_active_flows,\"flows left\")\n\n while num_active_flows > 0:\n \n # number of rem flows on all links\n link_weights = np.dot(routes.T, weights)\n assert(rem_cap.shape == link_weights.shape)\n try:\n fair_shares = np.where(link_weights>0, rem_cap/link_weights, float('inf'))\n except:\n pass\n #print(\"link_weights\", link_weights)\n #print(\"rem_cap\", rem_cap)\n #print(\"fair_shares\", fair_shares)\n fair_shares.reshape(self.num_links, 1)\n bl = np.argmin(fair_shares)\n #print (\"bl\",type(bl),bl)\n inc = float(fair_shares[bl, 0])\n assert(inc < float('inf'))\n\n # increase level, only when link with smallest fair share rate\n # has a rate larger than last one, handles the following example\n # two links, each cap 10.0, each has one flow, and none in common\n # each link identified in different iterations of this loop\n if self.max_level == 0 or inc > eps: self.max_level += 1\n x = np.where(active_flows, x + inc * weights, x)\n\n if log:\n print \"In round\",self.max_level,\\\n \" link\", bl, \"has smallest fair share\", inc, \"b/s\",\\\n \"Next rate increase is\", inc, \" (type \", type(inc), \") cuz of bl \",\\\n bl, \" with rem_cap \", rem_cap[bl,0], \" b/s\",\\\n \"and \", link_weights[bl,0] , \" of the total \",\\\n num_active_flows, \" remaining flows\"\n rem_cap = rem_cap - inc * link_weights\n neg_cap = list(np.where(rem_cap < -1e7)[0]) # for each (aka only) column \n if (len(neg_cap) > 0):\n print >> sys.stderr, \"warning! in watefilling hp links with neg. rem_cap \", neg_cap\n bf = np.where(routes[:,bl] > 0)[0]\n active_flows[bf] = 0\n num_active_flows = np.count_nonzero(active_flows, axis=0)\n #print(num_active_flows,\"flows left\")\n weights[bf] = 0\n self.levels[bl] = self.max_level\n \n # get max. rate at each link\n r = np.ones((self.num_links,1)) * float('inf')\n for e in range(self.num_links):\n flows = np.nonzero(routes[:, e])[0]\n if len(flows) > 0:\n sum_demands = sum(x[flows])[0]\n cap = c[e,0]\n diff = abs(sum_demands - cap)\n if (sum_demands > cap or diff < eps):\n r[e] = max(x[flows])\n print \"link\",e,\"has rate\", r[e]\n\n self.level = self.max_level\n self.x = x\n self.r = r\n\n self.bottleneck_links_arr = np.where(self.r < float('inf'))[0]\n self.bottleneck_links = {}\n self.non_bottleneck_links = {}\n\n self.sat_flows = {}\n self.unsat_flows = {}\n\n# class Eps:\n# def __init__(self):\n# self.eps1 = 1e-7\n# pass\n\n# def main():\n# for num_flows in [10, 100, 1000, 10000]:\n# start = time.time()\n# routes = np.ones((num_flows, 2))\n# routes[:, 1] = 0\n# routes[0:2, 1] = 1\n# routes[0, 0] = 0\n# c = np.ones((2,1))\n \n# wf = Waterfilling(routes, c, True, Eps())\n# stop = time.time()\n# elapsed = stop - start\n# print(\"num_flows\", num_flows, \"elapsed\", elapsed,\"s\")\n# #print wf.x\n# #print wf.r\n# #print wf.level\n# pass\n\n# main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
no = int(input("Enter a number: "))
no = str(no)
rev = no[::-1]
if no==rev:
print(f"{no}--->{rev} Input is a palindrome")
else:
print(f"{no}--->{rev} Input is not a palindrome")
|
normal
|
{
"blob_id": "020a41e7d3cc3f5adf3a38a6852dac6037595372",
"index": 2043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif no == rev:\n print(f'{no}--->{rev} Input is a palindrome')\nelse:\n print(f'{no}--->{rev} Input is not a palindrome')\n",
"step-3": "no = int(input('Enter a number: '))\nno = str(no)\nrev = no[::-1]\nif no == rev:\n print(f'{no}--->{rev} Input is a palindrome')\nelse:\n print(f'{no}--->{rev} Input is not a palindrome')\n",
"step-4": "no = int(input(\"Enter a number: \"))\nno = str(no)\nrev = no[::-1]\nif no==rev:\n print(f\"{no}--->{rev} Input is a palindrome\")\nelse:\n print(f\"{no}--->{rev} Input is not a palindrome\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
class VirtualArray(common.CoprHDResource):
# Commonly used URIs for the 'varrays' module
URI_VIRTUALARRAY = '/vdc/varrays'
URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}'
URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}'
def varray_query(self, name):
"""Returns the UID of the varray specified by the name."""
if common.is_uri(name):
return name
uris = self.varray_list()
for uri in uris:
varray = self.varray_show(uri)
if varray and varray['name'] == name:
return varray['id']
raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR,
(_("varray %s: not found") % name))
def varray_list(self, vdcname=None):
"""Returns all the varrays in a vdc.
:param vdcname: Name of the Virtual Data Center
:returns: JSON payload of varray list
"""
vdcrestapi = None
if vdcname is not None:
vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(
vdcname)
else:
vdcrestapi = VirtualArray.URI_VIRTUALARRAY
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
vdcrestapi, None)
o = common.json_decode(s)
returnlst = []
for item in o['varray']:
returnlst.append(item['id'])
return returnlst
def varray_show(self, label):
"""Makes REST API call to retrieve varray details based on name."""
uri = self.varray_query(label)
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
VirtualArray.URI_VIRTUALARRAY_URI.format(uri),
None)
o = common.json_decode(s)
if 'inactive' in o and o['inactive'] is True:
return None
else:
return o
|
normal
|
{
"blob_id": "2d48a343ca7f0f8ba7de8b520aad71d774d9b4ba",
"index": 9302,
"step-1": "<mask token>\n\n\nclass VirtualArray(common.CoprHDResource):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def varray_list(self, vdcname=None):\n \"\"\"Returns all the varrays in a vdc.\n\n :param vdcname: Name of the Virtual Data Center\n :returns: JSON payload of varray list\n \"\"\"\n vdcrestapi = None\n if vdcname is not None:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(vdcname\n )\n else:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n vdcrestapi, None)\n o = common.json_decode(s)\n returnlst = []\n for item in o['varray']:\n returnlst.append(item['id'])\n return returnlst\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass VirtualArray(common.CoprHDResource):\n <mask token>\n <mask token>\n <mask token>\n\n def varray_query(self, name):\n \"\"\"Returns the UID of the varray specified by the name.\"\"\"\n if common.is_uri(name):\n return name\n uris = self.varray_list()\n for uri in uris:\n varray = self.varray_show(uri)\n if varray and varray['name'] == name:\n return varray['id']\n raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, _(\n 'varray %s: not found') % name)\n\n def varray_list(self, vdcname=None):\n \"\"\"Returns all the varrays in a vdc.\n\n :param vdcname: Name of the Virtual Data Center\n :returns: JSON payload of varray list\n \"\"\"\n vdcrestapi = None\n if vdcname is not None:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(vdcname\n )\n else:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n vdcrestapi, None)\n o = common.json_decode(s)\n returnlst = []\n for item in o['varray']:\n returnlst.append(item['id'])\n return returnlst\n\n def varray_show(self, label):\n \"\"\"Makes REST API call to retrieve varray details based on name.\"\"\"\n uri = self.varray_query(label)\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n VirtualArray.URI_VIRTUALARRAY_URI.format(uri), None)\n o = common.json_decode(s)\n if 'inactive' in o and o['inactive'] is True:\n return None\n else:\n return o\n",
"step-3": "<mask token>\n\n\nclass VirtualArray(common.CoprHDResource):\n URI_VIRTUALARRAY = '/vdc/varrays'\n URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}'\n URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}'\n\n def varray_query(self, name):\n \"\"\"Returns the UID of the varray specified by the name.\"\"\"\n if common.is_uri(name):\n return name\n uris = self.varray_list()\n for uri in uris:\n varray = self.varray_show(uri)\n if varray and varray['name'] == name:\n return varray['id']\n raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, _(\n 'varray %s: not found') % name)\n\n def varray_list(self, vdcname=None):\n \"\"\"Returns all the varrays in a vdc.\n\n :param vdcname: Name of the Virtual Data Center\n :returns: JSON payload of varray list\n \"\"\"\n vdcrestapi = None\n if vdcname is not None:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(vdcname\n )\n else:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n vdcrestapi, None)\n o = common.json_decode(s)\n returnlst = []\n for item in o['varray']:\n returnlst.append(item['id'])\n return returnlst\n\n def varray_show(self, label):\n \"\"\"Makes REST API call to retrieve varray details based on name.\"\"\"\n uri = self.varray_query(label)\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n VirtualArray.URI_VIRTUALARRAY_URI.format(uri), None)\n o = common.json_decode(s)\n if 'inactive' in o and o['inactive'] is True:\n return None\n else:\n return o\n",
"step-4": "from cinder.i18n import _\nfrom cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common\n\n\nclass VirtualArray(common.CoprHDResource):\n URI_VIRTUALARRAY = '/vdc/varrays'\n URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}'\n URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}'\n\n def varray_query(self, name):\n \"\"\"Returns the UID of the varray specified by the name.\"\"\"\n if common.is_uri(name):\n return name\n uris = self.varray_list()\n for uri in uris:\n varray = self.varray_show(uri)\n if varray and varray['name'] == name:\n return varray['id']\n raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, _(\n 'varray %s: not found') % name)\n\n def varray_list(self, vdcname=None):\n \"\"\"Returns all the varrays in a vdc.\n\n :param vdcname: Name of the Virtual Data Center\n :returns: JSON payload of varray list\n \"\"\"\n vdcrestapi = None\n if vdcname is not None:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(vdcname\n )\n else:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n vdcrestapi, None)\n o = common.json_decode(s)\n returnlst = []\n for item in o['varray']:\n returnlst.append(item['id'])\n return returnlst\n\n def varray_show(self, label):\n \"\"\"Makes REST API call to retrieve varray details based on name.\"\"\"\n uri = self.varray_query(label)\n s, h = common.service_json_request(self.ipaddr, self.port, 'GET',\n VirtualArray.URI_VIRTUALARRAY_URI.format(uri), None)\n o = common.json_decode(s)\n if 'inactive' in o and o['inactive'] is True:\n return None\n else:\n return o\n",
"step-5": "# Copyright (c) 2016 EMC Corporation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cinder.i18n import _\nfrom cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common\n\n\nclass VirtualArray(common.CoprHDResource):\n\n # Commonly used URIs for the 'varrays' module\n URI_VIRTUALARRAY = '/vdc/varrays'\n URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}'\n URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}'\n\n def varray_query(self, name):\n \"\"\"Returns the UID of the varray specified by the name.\"\"\"\n if common.is_uri(name):\n return name\n\n uris = self.varray_list()\n\n for uri in uris:\n varray = self.varray_show(uri)\n if varray and varray['name'] == name:\n return varray['id']\n\n raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR,\n (_(\"varray %s: not found\") % name))\n\n def varray_list(self, vdcname=None):\n \"\"\"Returns all the varrays in a vdc.\n\n :param vdcname: Name of the Virtual Data Center\n :returns: JSON payload of varray list\n \"\"\"\n vdcrestapi = None\n if vdcname is not None:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(\n vdcname)\n else:\n vdcrestapi = VirtualArray.URI_VIRTUALARRAY\n (s, h) = common.service_json_request(\n self.ipaddr, self.port, \"GET\",\n vdcrestapi, None)\n\n o = common.json_decode(s)\n\n returnlst = []\n for item in o['varray']:\n returnlst.append(item['id'])\n\n return returnlst\n\n def varray_show(self, label):\n \"\"\"Makes REST API call to retrieve varray details based on name.\"\"\"\n uri = self.varray_query(label)\n\n (s, h) = common.service_json_request(\n self.ipaddr, self.port, \"GET\",\n VirtualArray.URI_VIRTUALARRAY_URI.format(uri),\n None)\n\n o = common.json_decode(s)\n if 'inactive' in o and o['inactive'] is True:\n return None\n else:\n return o\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
version = (2, 5, 8)
version_string = ".".join(str(v) for v in version)
release_date = "2015.12.27"
|
normal
|
{
"blob_id": "28077af0759e062078f7b9d1f7bbbb93c62835cb",
"index": 5063,
"step-1": "<mask token>\n",
"step-2": "version = 2, 5, 8\nversion_string = '.'.join(str(v) for v in version)\nrelease_date = '2015.12.27'\n",
"step-3": "version = (2, 5, 8)\nversion_string = \".\".join(str(v) for v in version)\n\nrelease_date = \"2015.12.27\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import tkinter as tk
from pickplace import PickPlace
import sys
import math
from tkinter import messagebox
import os
DEBUG = False
class GerberCanvas:
file_gto = False
file_gtp = False
units = 0
units_string = ('i', 'm')
"""
my canvas
"""
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {} # dict to hold aperture commands
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
# fixme fix the scrollbars so that they work correctly
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
# Set this only if using in Linux
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
# file_path = askopenfilename(title='Open Top Silk Screen File', filetypes=[('GTO files', '*.GTO')],
# initialdir='')
all_ids = self.my_canvas.find_all()
# delete the current image if one exist.
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
# self._parse_file(gerber_file.read())
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
# self.bounding_box_size = self.my_canvas.bbox('all')
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
# load top pads into image
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
# self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path)-3]+'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
# self.scaled = False
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # define the aperture
name = item[3:item.find(',')-1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start-1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D': # set the current aperture
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value: # for a rectangle
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x)/2)
# todo send this to a function to get size
elif 'C,' in value: # for a circle
print(value)
self.current_aperture = self.__get_circle_diameter(value)
elif 'O,' in value: # for a ob-round
pass
elif 'P,' in value: # for a polygon
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
# This is the Flash command. Create a flash of the object.
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.units_string[GerberCanvas.units],
str(y0) + GerberCanvas.units_string[GerberCanvas.units],
str(x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.units],
outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
# the D02 command is the move to command.
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
# if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
if ('D01' in item) and (('I' not in item) and ('J' not in item)):
if self.file_gto: # draw a line
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x+'i', self.start_y+'i', self.x+'i', self.y+'i',
width=self.current_aperture+'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle/arc
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item) # test
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
outline='black', width=self.current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
break
else: # This draws arcs
# self.evaluate_arc_command(item)
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
# radius = math.degrees(self.__get_extent(radius))
try:
self.my_canvas.create_arc(str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
style=tk.ARC, width=self.current_aperture, start=start_angle,
extent=end_angle-start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x+1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y), float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = (1-((distance**2) / (2*(radius**2))))
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start+1:j_start]
j_temp = item[j_start+1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start+1:y_start]
y_temp = item[y_start+1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start+1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
# flip my y axis
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number)-5]
if '-' in number:
return first + '.' + last
# return '-' + first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + .1
last_y = float(y1) + .1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i',
outline=color, fill=color)
# elif layer == 'BottomLayer':
# self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i',
# str(last_y) + 'i', outline='blue', fill='blue')
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
# print(self.file_commands)
temp_list = self.file_commands
for item in temp_list:
# print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # diameter of the circle
name = item[3:item.find(',') - 1]
# print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
# print(value)
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
# print('I found a ', item)
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
self.__get_numbers(item)
# print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.start_y + 'i', self.x + 'i', self.y + 'i',
width=self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i', str(cp_y - radius) + 'i',
str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
outline='black', width=self.current_aperture)
else: # This draws arcs
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
# print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
str(cp_x - radius) + 'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
|
normal
|
{
"blob_id": "6b2f10449909d978ee294a502a376c8091af06e0",
"index": 1285,
"step-1": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n <mask token>\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n <mask token>\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-2": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n <mask token>\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-3": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n try:\n all_ids = self.my_canvas.find_all()\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-4": "<mask token>\nDEBUG = False\n\n\nclass GerberCanvas:\n file_gto = False\n file_gtp = False\n units = 0\n units_string = 'i', 'm'\n \"\"\"\n my canvas\n \"\"\"\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n try:\n all_ids = self.my_canvas.find_all()\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-5": "import tkinter as tk\nfrom pickplace import PickPlace\nimport sys\nimport math\nfrom tkinter import messagebox\nimport os\n\nDEBUG = False\n\n\nclass GerberCanvas:\n\n file_gto = False\n file_gtp = False\n units = 0\n units_string = ('i', 'm')\n\n \"\"\"\n my canvas\n \"\"\"\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {} # dict to hold aperture commands\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n\n # fixme fix the scrollbars so that they work correctly\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n\n # Set this only if using in Linux\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)\n\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n\n try:\n # file_path = askopenfilename(title='Open Top Silk Screen File', filetypes=[('GTO files', '*.GTO')],\n # initialdir='')\n\n all_ids = self.my_canvas.find_all()\n # delete the current image if one exist.\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n # self._parse_file(gerber_file.read())\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n # self.bounding_box_size = self.my_canvas.bbox('all')\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n # load top pads into image\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n # self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path)-3]+'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n # self.scaled = False\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n # print('units is ', self.units)\n\n if 'G01' in item:\n self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear\n\n if 'G03' in item:\n self.direction = 270 # CounterClockWise\n\n if 'G02' in item:\n self.direction = 90 # ClockWise\n\n if 'G74' in item:\n self.quadrant_mode = 0 # single Quadrant mode\n\n if 'G75' in item:\n self.quadrant_mode = 1 # Multi quadrant mode\n\n if '%AD' in item: # define the aperture\n name = item[3:item.find(',')-1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start-1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n\n if item[0:1] == 'D': # set the current aperture\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value: # for a rectangle\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x)/2)\n # todo send this to a function to get size\n elif 'C,' in value: # for a circle\n print(value)\n self.current_aperture = self.__get_circle_diameter(value)\n elif 'O,' in value: # for a ob-round\n pass\n elif 'P,' in value: # for a polygon\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n\n # This is the Flash command. Create a flash of the object.\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.units_string[GerberCanvas.units],\n str(y0) + GerberCanvas.units_string[GerberCanvas.units],\n str(x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.units],\n outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n\n # the D02 command is the move to command.\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n # if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line\n if ('D01' in item) and (('I' not in item) and ('J' not in item)):\n if self.file_gto: # draw a line\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)\n self.my_canvas.create_line(self.start_x+'i', self.start_y+'i', self.x+'i', self.y+'i',\n width=self.current_aperture+'i')\n self.start_x = self.x\n self.start_y = self.y\n\n # this Draws a circle.\n if 'D01' and 'I' and 'J' in item: # draw a circle/arc\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item) # test\n\n if self.quadrant_mode: # This draws circles or arcs\n if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],\n outline='black', width=self.current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning', 'Something went wrong.')\n break\n else: # This draws arcs\n # self.evaluate_arc_command(item)\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))\n # radius = math.degrees(self.__get_extent(radius))\n try:\n self.my_canvas.create_arc(str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],\n style=tk.ARC, width=self.current_aperture, start=start_angle,\n extent=end_angle-start_angle, outline='black')\n # self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,\n # outline='purple')\n except UnboundLocalError():\n messagebox.showwarning('Warning', 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x+1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y), float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = (1-((distance**2) / (2*(radius**2))))\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n\n i_temp = item[i_start+1:j_start]\n j_temp = item[j_start+1:d_start]\n j_temp = str(int(j_temp) * -1)\n\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n\n if 'X' and 'Y' in item:\n found = 0\n\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n\n x_temp = item[x_start+1:y_start]\n y_temp = item[y_start+1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n\n x_temp = item[x_start+1:d_start]\n\n self.x = self.__format_number(x_temp)\n\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n\n y_temp = item[y_start + 1:d_start]\n # flip my y axis\n y_temp = str(int(y_temp) * -1)\n\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number)-5]\n if '-' in number:\n return first + '.' + last\n # return '-' + first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + .1\n last_y = float(y1) + .1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i',\n outline=color, fill=color)\n # elif layer == 'BottomLayer':\n # self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i',\n # str(last_y) + 'i', outline='blue', fill='blue')\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += .1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= .1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n # print(self.file_commands)\n temp_list = self.file_commands\n for item in temp_list:\n # print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n # print('units is ', self.units)\n if 'G01' in item:\n self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear\n if 'G03' in item:\n self.direction = 270 # CounterClockWise\n if 'G02' in item:\n self.direction = 90 # ClockWise\n if 'G74' in item:\n self.quadrant_mode = 0 # single Quadrant mode\n if 'G75' in item:\n self.quadrant_mode = 1 # Multi quadrant mode\n if '%AD' in item: # diameter of the circle\n name = item[3:item.find(',') - 1]\n # print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n # print(value)\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n # print('I found a ', item)\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line\n self.__get_numbers(item)\n # print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y + 'i', self.x + 'i', self.y + 'i',\n width=self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n # this Draws a circle.\n if 'D01' and 'I' and 'J' in item: # draw a circle\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n\n if self.quadrant_mode: # This draws circles or arcs\n if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i', str(cp_y - radius) + 'i',\n str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',\n outline='black', width=self.current_aperture)\n\n else: # This draws arcs\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n # print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',\n str(cp_x - radius) + 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n # self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,\n # outline='purple')\n\n",
"step-ids": [
18,
19,
20,
23,
25
]
}
|
[
18,
19,
20,
23,
25
] |
from base import *
try:
from .prod_local import *
except:
pass
# we currently don't have an interface that allows an administrator
# to create a repository for another user. Until we have added this
# capability, allow users to create repos.
ELEMENTARY_ALLOW_REPO_CREATION = True
|
normal
|
{
"blob_id": "709271b98fc2b40c763522c54488be36968f02d8",
"index": 346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from .prod_local import *\nexcept:\n pass\n<mask token>\n",
"step-3": "<mask token>\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n",
"step-4": "from base import *\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n",
"step-5": "from base import *\n\ntry:\n from .prod_local import *\nexcept:\n pass\n\n# we currently don't have an interface that allows an administrator\n# to create a repository for another user. Until we have added this\n# capability, allow users to create repos.\nELEMENTARY_ALLOW_REPO_CREATION = True \n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tkinter as tk
from tkinter import Tk, BOTH,RIGHT,LEFT,END
from tkinter.ttk import Frame, Label, Style,Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg="#d0a3d8",height=200,width=200)
tk.Label(self, text="Mini Jeu: \n P-0", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
bt=Button(self, text="Jouer",
command=lambda: master.switch_frame(PageOne,num=True))
bt.pack(fill=BOTH,expand=True)
# tk.Button(self, text="Go to page two",
# command=lambda: master.switch_frame(PageTwo)).pack()
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
# tk.Frame.configure(self,bg='blue')
# tk.Label(self, text="Page de jeu", font=('Helvetica', 18, "bold")).pack(side="top", fill=BOTH, pady=5)
frame_left=Frame(self)
self.frame_left=frame_left
frame_left.pack(fill=BOTH,side=LEFT)
# add entry to this frame
self.label=tk.Label(frame_left , text="", font=('Helvetica', 10), fg='red')
self.label.pack()
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')
self.Nombre_1=Entry(frame_left)
self.Nombre_1.pack(side='top',anchor='w')
# bagnier pour differencier les couleurs
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')
self.Nombre_2=Entry(frame_left)
self.Nombre_2.pack(side='top',anchor='w')
tk.Button(frame_left, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
# self.update_clock()
self.master=master
self.commencer_un_jeu()
def create_circle(self,r, canvasName,color): #center coordinates, radius
x=random.randint(20,300)
y=random.randint(20,250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1,fill=color)
def create_ret(self,canvas):
return canvas.create_rectangle(0,500,500,0,fill="#fdffdb")
def update_clock(self):
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000,self.update_clock)
def commencer_un_jeu(self):
self.fin=True
try :
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0,END)
self.Nombre_1.delete(0,END)
except:
pass
self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())
self. bt_valider.pack(side='top',anchor='w')
self.debut=time.time()
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1=random.randint(1,10)
self.nombre_j2=random.randint(1,10)
for _ in range(self.nombre_j2):
self.create_circle(20,self.rectangle,'red')
for _ in range(self.nombre_j1):
self.create_circle(20,self.rectangle,'blue')
def fin_du_jeu(self):
self.fin=False
if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):
#jeu gagné
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Victoire")
else:
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Defaite")
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self,frame_game):
self.after(1000,frame_game.update_clock)
def switch_frame(self, frame_class,num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
# try:
# if num:
# print(frame_class)
# self.timer(frame_class)
# except:
# print("le frame n'est pas le bon")
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg='red')
tk.Label(self, text="Page two", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
tk.Button(self, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack()
if __name__ == "__main__":
app = SampleApp()
app.geometry('800x800')
app.mainloop()
|
normal
|
{
"blob_id": "4e6401672d4762b444bb679e4cc39ada04193a26",
"index": 1882,
"step-1": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n <mask token>\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-4": "import tkinter as tk\nfrom tkinter import Tk, BOTH, RIGHT, LEFT, END\nfrom tkinter.ttk import Frame, Label, Style, Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\nif __name__ == '__main__':\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()\n",
"step-5": "\nimport tkinter as tk\nfrom tkinter import Tk, BOTH,RIGHT,LEFT,END\nfrom tkinter.ttk import Frame, Label, Style,Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\nclass StartPage(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n \n tk.Frame.configure(self,bg=\"#d0a3d8\",height=200,width=200)\n\n tk.Label(self, text=\"Mini Jeu: \\n P-0\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n bt=Button(self, text=\"Jouer\",\n command=lambda: master.switch_frame(PageOne,num=True))\n bt.pack(fill=BOTH,expand=True)\n\n \n # tk.Button(self, text=\"Go to page two\",\n # command=lambda: master.switch_frame(PageTwo)).pack()\n\nclass PageOne(tk.Frame):\n def __init__(self, master):\n \n\n tk.Frame.__init__(self, master)\n # tk.Frame.configure(self,bg='blue')\n # tk.Label(self, text=\"Page de jeu\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=BOTH, pady=5)\n \n frame_left=Frame(self)\n self.frame_left=frame_left\n frame_left.pack(fill=BOTH,side=LEFT)\n\n\n # add entry to this frame \n self.label=tk.Label(frame_left , text=\"\", font=('Helvetica', 10), fg='red')\n self.label.pack()\n\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')\n\n \n\n self.Nombre_1=Entry(frame_left)\n self.Nombre_1.pack(side='top',anchor='w')\n\n# bagnier pour differencier les couleurs\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')\n\n\n self.Nombre_2=Entry(frame_left)\n self.Nombre_2.pack(side='top',anchor='w')\n\n tk.Button(frame_left, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack(side='bottom')\n\n \n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n \n # self.update_clock()\n self.master=master\n self.commencer_un_jeu()\n\n \n def create_circle(self,r, canvasName,color): #center coordinates, radius\n x=random.randint(20,300)\n y=random.randint(20,250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1,fill=color)\n def create_ret(self,canvas):\n return canvas.create_rectangle(0,500,500,0,fill=\"#fdffdb\")\n\n\n\n def update_clock(self):\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000,self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin=True\n try :\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0,END)\n self.Nombre_1.delete(0,END)\n\n except:\n pass\n\n\n self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())\n self. bt_valider.pack(side='top',anchor='w')\n\n self.debut=time.time()\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n \n\n self.rectangle.destroy()\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n\n self.nombre_j1=random.randint(1,10)\n self.nombre_j2=random.randint(1,10)\n for _ in range(self.nombre_j2):\n self.create_circle(20,self.rectangle,'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20,self.rectangle,'blue')\n def fin_du_jeu(self):\n self.fin=False\n if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):\n #jeu gagné\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n \n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Victoire\")\n else:\n\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n\n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Defaite\")\n\n\n \n\n \n\n\n\n\n \nclass SampleApp(tk.Tk):\n def __init__(self):\n\n tk.Tk.__init__(self)\n \n self._frame = None\n self.switch_frame(StartPage)\n \n\n def timer(self,frame_game):\n self.after(1000,frame_game.update_clock)\n\n\n def switch_frame(self, frame_class,num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n # try:\n \n # if num:\n # print(frame_class)\n # self.timer(frame_class) \n # except:\n # print(\"le frame n'est pas le bon\")\n\n\n\n\n\n\n\nclass PageTwo(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self,bg='red')\n tk.Label(self, text=\"Page two\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n tk.Button(self, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack()\n\nif __name__ == \"__main__\":\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()",
"step-ids": [
11,
12,
15,
17,
18
]
}
|
[
11,
12,
15,
17,
18
] |
#!/usr/bin/env python
#
# Copyright 2017-2021 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from sqlalchemy.exc import *
from Pegasus.db.admin.admin_loader import *
from Pegasus.db.admin.versions.base_version import BaseVersion
from Pegasus.db.schema import *
DB_VERSION = 8
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info("Updating to version %s" % DB_VERSION)
try:
log.info("Updating master_workflowstate...")
self.db.execute("ALTER TABLE master_workflowstate ADD reason TEXT NULL")
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"Downgrade is not necessary as reason accepts NULL values"
|
normal
|
{
"blob_id": "12fd4e3bfb6821205a9b65b4d236b4158ec4ef1e",
"index": 7345,
"step-1": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-3": "<mask token>\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-4": "import logging\nfrom sqlalchemy.exc import *\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-5": "#!/usr/bin/env python\n#\n# Copyright 2017-2021 University Of Southern California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport logging\n\nfrom sqlalchemy.exc import *\n\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\n\nDB_VERSION = 8\n\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info(\"Updating to version %s\" % DB_VERSION)\n try:\n log.info(\"Updating master_workflowstate...\")\n self.db.execute(\"ALTER TABLE master_workflowstate ADD reason TEXT NULL\")\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n\n self.db.commit()\n\n def downgrade(self, force=False):\n \"Downgrade is not necessary as reason accepts NULL values\"\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.