code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import tkinter
import webbrowser
ventana = tkinter.Tk()
ventana.geometry("1920x1080")
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana,text ="WEB", width = 10, height=5, command = test );
boton2 = tkinter.Button(ventana,text ="boton2", width = 10, height=5);
boton3 = tkinter.Button(ventana,text ="boton3", width = 10, height=5);
boton1.grid(row = 3, column = 0)
boton2.grid(row = 4, column = 0)
boton3.grid(row = 5, column = 0)
ventana.mainloop()
|
normal
|
{
"blob_id": "8bf330dc7bee65ac9478722233477ebe5d0286c2",
"index": 1102,
"step-1": "<mask token>\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\n<mask token>\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-3": "<mask token>\nventana = tkinter.Tk()\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\nboton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)\nboton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)\nboton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-4": "import tkinter\nimport webbrowser\nventana = tkinter.Tk()\nventana.geometry('1920x1080')\n\n\ndef test():\n webbrowser.open_new_tab('Test.html')\n\n\nboton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)\nboton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)\nboton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)\nboton1.grid(row=3, column=0)\nboton2.grid(row=4, column=0)\nboton3.grid(row=5, column=0)\nventana.mainloop()\n",
"step-5": "import tkinter\r\nimport webbrowser\r\nventana = tkinter.Tk()\r\nventana.geometry(\"1920x1080\")\r\n\r\ndef test():\r\n webbrowser.open_new_tab('Test.html')\r\n\r\nboton1 = tkinter.Button(ventana,text =\"WEB\", width = 10, height=5, command = test );\r\nboton2 = tkinter.Button(ventana,text =\"boton2\", width = 10, height=5);\r\nboton3 = tkinter.Button(ventana,text =\"boton3\", width = 10, height=5);\r\n\r\n\r\nboton1.grid(row = 3, column = 0)\r\nboton2.grid(row = 4, column = 0)\r\nboton3.grid(row = 5, column = 0)\r\n\r\nventana.mainloop()\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import gym
import random
import numpy as np
import statistics
from collections import Counter
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
#setup the Cartpole environment
env = gym.make("CartPole-v0")
env.reset()
#----------Explore CartPole-------------#
#exploring the observations, rewards, actions
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print("Action: ", action, "Rewards", reward)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
#explore_cartpole()
#----------Collect Training Data-------------#
#collect data from successful games by running x games
#successful would be say, lasting more than 100 frames
num_games = 20000
num_episodes = 201 #game would end at 200 episodes
min_score = 75
def initial_games():
train_data = []
train_scores = []
#running our initial set of games
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
#running the game, frame by frame
for _ in range(num_episodes):
#choosing actions: randomly
action = random.randrange(0,2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
#print("Score was: ", score)
break
#if the score was above the threshold
#we will save the game in our training data
#hence training on the better games
if score >= min_score :
train_scores.append(score)
#converting the data into one-hot output
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
#----------Build the FC NN model-------------#
#building a simple multi-layer fully connected model
#this model can be generally used to play games like cartpole
#would try training the model on other games in OpenAI environment
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
#----------Train the model-------------#
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size = len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500,
show_metric = True, run_id = 'openai_learning')
return model
train_data = initial_games()
#print("Size of training data",len(train_data))
model = train_model(train_data)
#----------Predict actions for the games-------------#
num_final_games = 10
target_episodes = 201
all_rewards = []
all_actions = []
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
#env.render()
#instead of randomly choosing the action, predict the actions
if len(prev_obs) == 0:
action = random.randrange(0,2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])
all_actions.append(action)
#let's run the game
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
#----------Print results-------------#
print('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
|
normal
|
{
"blob_id": "7789e54acc02fe0277ff80ce14efbcdc4ee6e7f1",
"index": 8009,
"step-1": "<mask token>\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\n<mask token>\n",
"step-2": "<mask token>\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\n<mask token>\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-3": "<mask token>\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-4": "import gym\nimport random\nimport numpy as np\nimport statistics\nfrom collections import Counter\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-5": "import gym\nimport random \nimport numpy as np\nimport statistics\nfrom collections import Counter\n\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\n\n#setup the Cartpole environment\nenv = gym.make(\"CartPole-v0\")\nenv.reset()\n\n\n#----------Explore CartPole-------------#\n#exploring the observations, rewards, actions\ndef explore_cartpole():\n\tfor i_episode in range(2):\n\t observation = env.reset()\n\t for t in range(100):\n\t env.render()\n\t print(observation)\n\t action = env.action_space.sample()\n\t observation, reward, done, info = env.step(action)\n\t print(\"Action: \", action, \"Rewards\", reward)\n\t if done:\n\t print(\"Episode finished after {} timesteps\".format(t+1))\n\t break\n\n#explore_cartpole() \n\n#----------Collect Training Data-------------#\n#collect data from successful games by running x games\n#successful would be say, lasting more than 100 frames\nnum_games = 20000\nnum_episodes = 201 #game would end at 200 episodes\nmin_score = 75\n\ndef initial_games():\n\n\ttrain_data = []\n\ttrain_scores = []\n\n\t#running our initial set of games\n\tfor _ in range(num_games):\n\t\tgame_data = []\n\t\tprev_obs = []\n\t\tscore = 0\n\n\t\t#running the game, frame by frame\n\t\tfor _ in range(num_episodes):\n\t\t\t#choosing actions: randomly\n\t\t\taction = random.randrange(0,2)\n\t\t\tobservation, reward, done, info = env.step(action)\n\n\t\t\tif len(prev_obs) > 0: \n\t\t\t\tgame_data.append([prev_obs, action])\n\n\t\t\tprev_obs = observation\n\t\t\tscore += reward\n\n\t\t\tif done:\n\t\t\t\t#print(\"Score was: \", score)\n\t\t\t\tbreak\n\n\t\t#if the score was above the threshold\n\t\t#we will save the game in our training data\n\t\t#hence training on the better games\n\t\tif score >= min_score :\n\t\t\ttrain_scores.append(score)\n\t\t\t#converting the data into one-hot output\t\t\n\t\t\tfor i in game_data:\t\t\t\n\t\t\t\tif i[1] == 0:\n\t\t\t\t\toutput = [1, 0]\n\t\t\t\telse:\n\t\t\t\t\toutput = [0, 1]\n\t\t\t\t\n\t\t\t\ttrain_data.append([i[0], output])\n\n\t\tenv.reset()\n\n\treturn train_data\n\n\n#----------Build the FC NN model-------------#\n#building a simple multi-layer fully connected model\n#this model can be generally used to play games like cartpole\n#would try training the model on other games in OpenAI environment\n\ndef nn_model(input_size):\n\n network = input_data(shape=[None, input_size, 1], name='input')\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n\n return model\n\n\n\n#----------Train the model-------------#\ndef train_model(train_data, model=False):\n\n\tx = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)\n\ty = [i[1] for i in train_data]\n\n\tif not model:\n\t\tmodel = nn_model(input_size = len(x[0]))\n\n\tmodel.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500, \n\t\tshow_metric = True, run_id = 'openai_learning')\n\treturn model\n\ntrain_data = initial_games()\n#print(\"Size of training data\",len(train_data))\n\nmodel = train_model(train_data)\n\n#----------Predict actions for the games-------------#\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\n\nfor _ in range(num_final_games):\n\ttotal_score = 0\n\tprev_obs = []\n\tenv.reset()\n\n\tfor _ in range(target_episodes):\n\n\t\t#env.render()\n\n\t\t#instead of randomly choosing the action, predict the actions\n\t\tif len(prev_obs) == 0:\n\t\t\taction = random.randrange(0,2)\n\t\telse:\n\t\t\taction = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])\n\t\t\n\t\tall_actions.append(action)\n\n\t\t#let's run the game\n\t\tobservation, reward, done, info = env.step(action)\n\t\t\n\t\tprev_obs = observation\n\t\ttotal_score += reward\n\n\t\tif done: \n\t\t\tbreak\n\n\tall_rewards.append(total_score)\n\n#----------Print results-------------#\nprint('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import time,random,os
from tkinter import *
def restart():
root.destroy()
os.startfile(r"data\programs\game with tkinter.py")
def disableButton():
global l,restartButton,start
b1.config(state="disabled")
b2.config(state="disabled")
b3.config(state="disabled")
b4.config(state="disabled")
b5.config(state="disabled")
b6.config(state="disabled")
b7.config(state="disabled")
b8.config(state="disabled")
b9.config(state="disabled")
start.config(state="disabled")
restartButton.config(state="normal",command=restart,text=" --->press to restart<--- ")
def funForB1():
global notPresentList,element,l,start
ans = notPresentList[0] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB2():
global notPresentList,element,l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList,element,l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList,element,l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList,element,l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList,element,l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList,element,l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList,element,l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList,element,l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open(r"data\database\present.txt", "r") as file:
content = file.read().split("\n")
presentList = [
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)]
]
element = presentList[random.randint(0,8)]
return (presentList,element)
def notPresent():
global buttonList,start
with open(r"data\database\notpresent.txt","r") as file:
content = file.read().split("\n")
notPresentList = [
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
]
start.config(state="normal")
obj = present()
presentList,element = obj[0],obj[1]
for i in range(9):
buttonList[i].config(text = presentList[i], state="disabled")
notPresentList.insert(random.randint(0,9),element)
return (notPresentList,element)
def start():
global buttonList,start,notPresentList,element
start.config(state="disabled")
for i in range(9):
buttonList[i].config(text = notPresentList[i], state="normal")
# main
root =Tk()
root.title("Memory Game")
root.geometry("400x500")
root.resizable(0,0)
root.config(bg="white")
image1 = PhotoImage(file=r"data\img\smiley.png")
image2 = PhotoImage(file=r"data\img\pleading.png")
start = Button(root, bg="black", fg="white", text="-->Start<--", font="comicsansms 15 bold", command=start, relief="raised",state="normal", bd=2)
start.place(x=150,y=110)
frameMain = Frame(root, relief="flat", bd=1, background="white", width=400, height=417)
frameMain.place(x=10, y=150)
image=PhotoImage(file=r"data\img\emoji.png")
l=Label(root,image=image ,font="comicsansms 15 bold", fg="black", bg="white")
l.place(x=180,y=5)
b1=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB1)
b2=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB2)
b3=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB3)
b4=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB4)
b5=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB5)
b6=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB6)
b7=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB7)
b8=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB8)
b9=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB9)
b1.place(x=10,y=16)
b2.place(x=150,y=16)
b3.place(x=290,y=16)
b4.place(x=10,y=110)
b5.place(x=150,y=110)
b6.place(x=290,y=110)
b7.place(x=10,y=204)
b8.place(x=150,y=204)
b9.place(x=290,y=204)
buttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]
restartButton = Button(root, bg="teal", fg="white", text="!!! Remember these items !!!", font="comicsansms 15 bold", relief="raised",state="disabled",disabledforeground="white")
restartButton.place(x=60,y=460)
obj = notPresent()
notPresentList,element = obj[0],obj[1]
root.mainloop()
|
normal
|
{
"blob_id": "e70c5c9a62faa4c501c0f103ce0a0a419aaf4301",
"index": 2096,
"step-1": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\nroot.title('Memory Game')\nroot.geometry('400x500')\nroot.resizable(0, 0)\nroot.config(bg='white')\n<mask token>\nstart.place(x=150, y=110)\n<mask token>\nframeMain.place(x=10, y=150)\n<mask token>\nl.place(x=180, y=5)\n<mask token>\nb1.place(x=10, y=16)\nb2.place(x=150, y=16)\nb3.place(x=290, y=16)\nb4.place(x=10, y=110)\nb5.place(x=150, y=110)\nb6.place(x=290, y=110)\nb7.place(x=10, y=204)\nb8.place(x=150, y=204)\nb9.place(x=290, y=204)\n<mask token>\nrestartButton.place(x=60, y=460)\n<mask token>\nroot.mainloop()\n",
"step-5": "import time,random,os\nfrom tkinter import *\n\ndef restart():\n root.destroy()\n os.startfile(r\"data\\programs\\game with tkinter.py\")\n \ndef disableButton():\n global l,restartButton,start\n b1.config(state=\"disabled\")\n b2.config(state=\"disabled\")\n b3.config(state=\"disabled\")\n b4.config(state=\"disabled\")\n b5.config(state=\"disabled\")\n b6.config(state=\"disabled\")\n b7.config(state=\"disabled\")\n b8.config(state=\"disabled\")\n b9.config(state=\"disabled\")\n start.config(state=\"disabled\")\n restartButton.config(state=\"normal\",command=restart,text=\" --->press to restart<--- \")\n \ndef funForB1():\n global notPresentList,element,l,start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB2():\n global notPresentList,element,l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB3():\n global notPresentList,element,l\n ans = notPresentList[2] == element\n if ans:\n \n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB4():\n global notPresentList,element,l\n ans = notPresentList[3] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB5():\n global notPresentList,element,l\n ans = notPresentList[4] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB6():\n global notPresentList,element,l\n ans = notPresentList[5] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB7():\n global notPresentList,element,l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB8():\n global notPresentList,element,l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB9():\n global notPresentList,element,l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open(r\"data\\database\\present.txt\", \"r\") as file:\n content = file.read().split(\"\\n\")\n presentList = [\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)]\n ]\n \n element = presentList[random.randint(0,8)]\n return (presentList,element)\n\ndef notPresent():\n global buttonList,start\n with open(r\"data\\database\\notpresent.txt\",\"r\") as file:\n content = file.read().split(\"\\n\")\n notPresentList = [\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n ]\n start.config(state=\"normal\")\n obj = present()\n presentList,element = obj[0],obj[1]\n for i in range(9):\n buttonList[i].config(text = presentList[i], state=\"disabled\")\n notPresentList.insert(random.randint(0,9),element)\n\n return (notPresentList,element)\n\ndef start():\n global buttonList,start,notPresentList,element\n start.config(state=\"disabled\")\n\n for i in range(9):\n buttonList[i].config(text = notPresentList[i], state=\"normal\")\n\n \n \n\n \n# main\n\nroot =Tk()\nroot.title(\"Memory Game\")\nroot.geometry(\"400x500\")\nroot.resizable(0,0)\nroot.config(bg=\"white\")\n\nimage1 = PhotoImage(file=r\"data\\img\\smiley.png\")\nimage2 = PhotoImage(file=r\"data\\img\\pleading.png\")\n\n\nstart = Button(root, bg=\"black\", fg=\"white\", text=\"-->Start<--\", font=\"comicsansms 15 bold\", command=start, relief=\"raised\",state=\"normal\", bd=2)\nstart.place(x=150,y=110)\n\n\n\nframeMain = Frame(root, relief=\"flat\", bd=1, background=\"white\", width=400, height=417)\nframeMain.place(x=10, y=150)\n\n\nimage=PhotoImage(file=r\"data\\img\\emoji.png\")\nl=Label(root,image=image ,font=\"comicsansms 15 bold\", fg=\"black\", bg=\"white\")\nl.place(x=180,y=5)\n\nb1=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB1)\nb2=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB2)\nb3=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB3)\nb4=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB4)\nb5=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB5)\nb6=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB6)\nb7=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB7)\nb8=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB8)\nb9=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB9)\n\n\nb1.place(x=10,y=16)\nb2.place(x=150,y=16)\nb3.place(x=290,y=16)\nb4.place(x=10,y=110)\nb5.place(x=150,y=110)\nb6.place(x=290,y=110)\nb7.place(x=10,y=204)\nb8.place(x=150,y=204)\nb9.place(x=290,y=204)\n\nbuttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]\n\n\nrestartButton = Button(root, bg=\"teal\", fg=\"white\", text=\"!!! Remember these items !!!\", font=\"comicsansms 15 bold\", relief=\"raised\",state=\"disabled\",disabledforeground=\"white\")\nrestartButton.place(x=60,y=460)\nobj = notPresent()\nnotPresentList,element = obj[0],obj[1]\n\nroot.mainloop()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
from django.contrib import admin
from django.urls import path, include
from accounts import views
urlpatterns = [
path('google/login', views.google_login),
path('google/callback/', views.google_callback),
path('accounts/google/login/finish/', views.GoogleLogin.as_view(), name = 'google_login_todjango'),
]
|
normal
|
{
"blob_id": "68319663aad13b562e56b8ee25f25c7b548417df",
"index": 4739,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('google/login', views.google_login), path(\n 'google/callback/', views.google_callback), path(\n 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name=\n 'google_login_todjango')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path, include\nfrom accounts import views\nurlpatterns = [path('google/login', views.google_login), path(\n 'google/callback/', views.google_callback), path(\n 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name=\n 'google_login_todjango')]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path, include\n\nfrom accounts import views\n\nurlpatterns = [\n path('google/login', views.google_login),\n path('google/callback/', views.google_callback),\n path('accounts/google/login/finish/', views.GoogleLogin.as_view(), name = 'google_login_todjango'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import re
import datetime as dt
from datetime import datetime
import time
import random
import json
import sys
import requests
import os
import pickle
import cv2
import numpy as np
import cPickle
import multiprocessing as mp
import math
root = "/datasets/sagarj/instaSample6000/"
# post_dir = root + "/"
videos_dir = root + "videos/"
#frame_dir = root + "AestheticSamples/"
sample_dir = root + "finesamples/"
sampledLog = "../Logs/instaLongSampling.txt"
def sampleVideo(videoPath , facesPath , postID , rate):
cap = cv2.VideoCapture(videoPath)
#print videoPath
totFrames = 0
i = 0
framesRead = 0
framesSaved = 0
frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
if math.isnan(frameRate):
frameRate = int(24 * rate)
frameRate = int(frameRate*rate)
if frameRate == 0:
frameRate = int(24 * rate)
while True:
ret, frame = cap.read()
if ret:
framesRead += 1
procs = []
totFrames += 1
cv2.waitKey(20)
if totFrames%frameRate == 0:
i = int(totFrames/frameRate)
framesSaved +=1
imageName = facesPath + "/" + str(postID) + "+" + str(i) + ".jpg"
cv2.imwrite( imageName , frame)
logline = str(postID) + "," + imageName
#print logline
logfile = open(sampledLog, 'a+')
cPickle.dump(logline , logfile);
logfile.close()
else:
print "Done processing Post: %s with %d frames Read and %d saved at %d FPS"%(postID,framesRead,framesSaved,frameRate)
return framesSaved
# def readJson(path):
# f = open(path)
# data = json.loads(f.read())
# return data
# def getPosts(postsDir):
# crawledPosts = os.listdir(postsDir)
# posts = []
# for post in crawledPosts:
# record = readJson(postsDir + post)
# p = record['data']
# if isinstance(p,dict):
# posts.append(p['records'][0])
# return posts
# def getMappingDict(postList):
# mapping = dict()
# for p in postList:
# postId = p['postId']
# vidName = p['videoUrl'].split('/')[5].split('?')[0]
# mapping[postId] = vidName
# return mapping
if __name__ == '__main__':
#postList = getPosts(post_dir)
#mappingDict = getMappingDict(postList)
vidList = os.listdir(videos_dir)
for k in vidList:
postID = k.split('.')[0]
#sampledNumbers = sampleVideo(videos_dir+mappingDict[k] ,frame_dir , postID , 1)
sampledNumbers = sampleVideo(videos_dir+k ,sample_dir , postID , 1)
|
normal
|
{
"blob_id": "ac978accc821600ad8def04b9c7423fbe6759e43",
"index": 6203,
"step-1": "import re\nimport datetime as dt\nfrom datetime import datetime\nimport time\nimport random\nimport json\nimport sys\nimport requests\nimport os\nimport pickle\nimport cv2\nimport numpy as np\nimport cPickle\nimport multiprocessing as mp\nimport math\n\nroot = \"/datasets/sagarj/instaSample6000/\"\n\n# post_dir = root + \"/\"\nvideos_dir = root + \"videos/\"\n#frame_dir = root + \"AestheticSamples/\"\nsample_dir = root + \"finesamples/\"\n\nsampledLog = \"../Logs/instaLongSampling.txt\"\n\n\ndef sampleVideo(videoPath , facesPath , postID , rate):\n cap = cv2.VideoCapture(videoPath)\n #print videoPath\n totFrames = 0\n i = 0\n framesRead = 0\n framesSaved = 0\n frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)\n\n if math.isnan(frameRate):\n frameRate = int(24 * rate)\n frameRate = int(frameRate*rate)\n if frameRate == 0:\n frameRate = int(24 * rate)\n while True:\n ret, frame = cap.read()\n if ret:\n framesRead += 1\n procs = []\n totFrames += 1\n cv2.waitKey(20)\n if totFrames%frameRate == 0:\n i = int(totFrames/frameRate)\n framesSaved +=1\n imageName = facesPath + \"/\" + str(postID) + \"+\" + str(i) + \".jpg\"\n cv2.imwrite( imageName , frame)\n logline = str(postID) + \",\" + imageName\n #print logline\n logfile = open(sampledLog, 'a+')\n cPickle.dump(logline , logfile);\n logfile.close()\n \n else:\n print \"Done processing Post: %s with %d frames Read and %d saved at %d FPS\"%(postID,framesRead,framesSaved,frameRate)\n return framesSaved\n\n# def readJson(path):\n# f = open(path)\n# data = json.loads(f.read())\n# return data\n\n# def getPosts(postsDir):\n# crawledPosts = os.listdir(postsDir)\n# posts = []\n# for post in crawledPosts:\n# record = readJson(postsDir + post)\n# p = record['data']\n# if isinstance(p,dict):\n# posts.append(p['records'][0])\n# return posts\n\n# def getMappingDict(postList):\n# mapping = dict()\n# for p in postList:\n# postId = p['postId']\n# vidName = p['videoUrl'].split('/')[5].split('?')[0]\n# mapping[postId] = vidName\n# return mapping\n\nif __name__ == '__main__':\n \n #postList = getPosts(post_dir)\n #mappingDict = getMappingDict(postList)\n vidList = os.listdir(videos_dir)\n \n for k in vidList: \n postID = k.split('.')[0]\n #sampledNumbers = sampleVideo(videos_dir+mappingDict[k] ,frame_dir , postID , 1)\n sampledNumbers = sampleVideo(videos_dir+k ,sample_dir , postID , 1)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/env python
import os
import glob
import math
from array import array
import sys
import time
import subprocess
import ROOT
mass=[600,700,800,900,1000]
cprime=[01,02,03,05,07,10]
BRnew=[00,01,02,03,04,05]
for i in range(len(mass)):
for j in range(len(cprime)):
for k in range(len(BRnew)):
command="hadd -f cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin.ProfileLikelihood.mH%03d.root cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin_*"%(mass[i],cprime[j],BRnew[k],mass[i],mass[i],cprime[j],BRnew[k]);
os.system(command);
|
normal
|
{
"blob_id": "a9e5d4d48f96974da772f47a4c20ebc96bc31d85",
"index": 8740,
"step-1": "#! /usr/bin/env python\nimport os\nimport glob\nimport math\nfrom array import array\nimport sys\nimport time\nimport subprocess\nimport ROOT\n\nmass=[600,700,800,900,1000]\ncprime=[01,02,03,05,07,10]\nBRnew=[00,01,02,03,04,05]\n\nfor i in range(len(mass)):\n for j in range(len(cprime)):\n for k in range(len(BRnew)):\n\n command=\"hadd -f cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin.ProfileLikelihood.mH%03d.root cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin_*\"%(mass[i],cprime[j],BRnew[k],mass[i],mass[i],cprime[j],BRnew[k]);\n os.system(command);\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from model.area import AreaModel
from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
class Area(Resource):
pareser = reqparse.RequestParser()
pareser.add_argument('name',
type = str,
required = True,
help = 'Area name is required')
@jwt_required()
def get(self, name):
area = AreaModel.search_area_byname(name)
if area:
return area.json(), 200
else:
return {'message': 'Area not found'}, 404
@jwt_required()
def put(self, name):
area = AreaModel.search_area_byname(name)
if area:
return {'message': 'Aread already exists'}, 404
else:
area = AreaModel(name)
area.save_to_db()
return area.json()
@jwt_required()
def delete(self,name):
area = AreaModel.search_area_byname(name)
if area:
area.delete()
return {'message':"Area with name '{}' deleted".format(name)}, 204
else:
return {'message': 'Wrong area name provided'}, 404
class AreaList(Resource):
@jwt_required()
def get(self):
return(list[map(lambda x: x.json() for x in StoreMode.query.all())])
|
normal
|
{
"blob_id": "4dcc0261abdb783c60471736567faf7db8b56190",
"index": 9548,
"step-1": "<mask token>\n\n\nclass Area(Resource):\n <mask token>\n pareser.add_argument('name', type=str, required=True, help=\n 'Area name is required')\n\n @jwt_required()\n def get(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return area.json(), 200\n else:\n return {'message': 'Area not found'}, 404\n\n @jwt_required()\n def put(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return {'message': 'Aread already exists'}, 404\n else:\n area = AreaModel(name)\n area.save_to_db()\n return area.json()\n <mask token>\n\n\nclass AreaList(Resource):\n\n @jwt_required()\n def get(self):\n return list[map(lambda x: x.json() for x in StoreMode.query.all())]\n",
"step-2": "<mask token>\n\n\nclass Area(Resource):\n <mask token>\n pareser.add_argument('name', type=str, required=True, help=\n 'Area name is required')\n\n @jwt_required()\n def get(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return area.json(), 200\n else:\n return {'message': 'Area not found'}, 404\n\n @jwt_required()\n def put(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return {'message': 'Aread already exists'}, 404\n else:\n area = AreaModel(name)\n area.save_to_db()\n return area.json()\n\n @jwt_required()\n def delete(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n area.delete()\n return {'message': \"Area with name '{}' deleted\".format(name)}, 204\n else:\n return {'message': 'Wrong area name provided'}, 404\n\n\nclass AreaList(Resource):\n\n @jwt_required()\n def get(self):\n return list[map(lambda x: x.json() for x in StoreMode.query.all())]\n",
"step-3": "<mask token>\n\n\nclass Area(Resource):\n pareser = reqparse.RequestParser()\n pareser.add_argument('name', type=str, required=True, help=\n 'Area name is required')\n\n @jwt_required()\n def get(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return area.json(), 200\n else:\n return {'message': 'Area not found'}, 404\n\n @jwt_required()\n def put(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return {'message': 'Aread already exists'}, 404\n else:\n area = AreaModel(name)\n area.save_to_db()\n return area.json()\n\n @jwt_required()\n def delete(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n area.delete()\n return {'message': \"Area with name '{}' deleted\".format(name)}, 204\n else:\n return {'message': 'Wrong area name provided'}, 404\n\n\nclass AreaList(Resource):\n\n @jwt_required()\n def get(self):\n return list[map(lambda x: x.json() for x in StoreMode.query.all())]\n",
"step-4": "from model.area import AreaModel\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\n\nclass Area(Resource):\n pareser = reqparse.RequestParser()\n pareser.add_argument('name', type=str, required=True, help=\n 'Area name is required')\n\n @jwt_required()\n def get(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return area.json(), 200\n else:\n return {'message': 'Area not found'}, 404\n\n @jwt_required()\n def put(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return {'message': 'Aread already exists'}, 404\n else:\n area = AreaModel(name)\n area.save_to_db()\n return area.json()\n\n @jwt_required()\n def delete(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n area.delete()\n return {'message': \"Area with name '{}' deleted\".format(name)}, 204\n else:\n return {'message': 'Wrong area name provided'}, 404\n\n\nclass AreaList(Resource):\n\n @jwt_required()\n def get(self):\n return list[map(lambda x: x.json() for x in StoreMode.query.all())]\n",
"step-5": "from model.area import AreaModel\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nclass Area(Resource):\n pareser = reqparse.RequestParser()\n pareser.add_argument('name', \n type = str,\n required = True,\n help = 'Area name is required')\n\n @jwt_required()\n def get(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return area.json(), 200\n else:\n return {'message': 'Area not found'}, 404\n \n @jwt_required()\n def put(self, name):\n area = AreaModel.search_area_byname(name)\n if area:\n return {'message': 'Aread already exists'}, 404\n else:\n area = AreaModel(name)\n area.save_to_db()\n return area.json()\n\n @jwt_required()\n def delete(self,name):\n area = AreaModel.search_area_byname(name)\n if area:\n area.delete()\n return {'message':\"Area with name '{}' deleted\".format(name)}, 204\n else:\n return {'message': 'Wrong area name provided'}, 404\n\nclass AreaList(Resource):\n @jwt_required()\n def get(self):\n return(list[map(lambda x: x.json() for x in StoreMode.query.all())])",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/python
#
# Author: Johnson Kachikaran ([email protected])
# Date: 7th August 2016
# Google Drive API:
# https://developers.google.com/drive/v3/reference/
# https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/
"""
Includes functions to integrate with a user's Google drive. The results and implementation is based on the API
provided by the Google Drive API:
https://developers.google.com/drive/v3/reference/
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/
"""
import io
import os
import threading
from googleapiclient.http import MediaIoBaseDownload
from colorker.security import CredentialManager
from colorker.settings import STORAGE
def list_files(query=None, order_by=None, files=False, user_settings=None):
drive_service = CredentialManager.get_client_drive_service(user_settings)
response = drive_service.files().list(
orderBy=order_by, q=query, pageSize=1000,
fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)
result, resources, names, parents = [], [], {}, {}
for drive_file in response.get('files', []):
names[str(drive_file['id'])] = str(drive_file['name'])
parents[str(drive_file['id'])] = drive_file.get('parents', [])
resources.append({'id': drive_file['id'], 'name': drive_file['name'],
'parents': [str(parent) for parent in drive_file.get('parents', [])],
'mimeType': drive_file['mimeType']})
while response.get('nextPageToken', None):
drive_files = drive_service.files()
response = drive_files.list(orderBy=order_by, q=query, pageSize=1000, pageToken=response['nextPageToken'],
fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)
for drive_file in response.get('files', []):
names[str(drive_file['id'])] = str(drive_file['name'])
parents[str(drive_file['id'])] = drive_file.get('parents', [])
resources.append({'id': drive_file['id'], 'name': drive_file['name'],
'parents': [str(parent) for parent in drive_file.get('parents', [])],
'mimeType': drive_file['mimeType']})
for resource in resources:
if resource['parents']:
for parent in resource['parents']:
path = str(names.get(parent, '')) + str('/') + str(resource['name'])
while parents.get(parent, []):
parent = str(parents[parent][0])
path = str(names.get(parent, '')) + str('/') + path
resource['name'] = path
if files:
if resource['mimeType'] != 'application/vnd.google-apps.folder':
result.append(resource)
else:
result.append(resource)
else:
if files:
if resource['mimeType'] != 'application/vnd.google-apps.folder':
result.append(resource)
else:
result.append(resource)
return result
def get_metadata(file_id, user_settings=None):
"""
Obtains the metadata of a file
:param str file_id: the identifier of the file whose metadata is needed
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink
"""
drive_service = CredentialManager.get_client_drive_service(user_settings)
files_service = drive_service.files().get(
fileId=file_id, fields='id, mimeType, size, parents, kind, name, fileExtension, webContentLink')
return files_service.execute(num_retries=3)
def get_file_contents(file_id, meta_err=False, user_settings=None):
"""
Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a
Google fusion table.
:param str file_id: the identifier of the file whose content is needed
:param bool meta_err: optional, internal use only
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: list of dictionaries where each dictionary is a row in the file
:rtype: list
"""
metadata = get_metadata(file_id, user_settings)
if (metadata.get('fileExtension', None) == 'csv' or metadata.get('mimeType', None) == 'text/csv') and metadata.get(
'webContentLink', None):
drive_service = CredentialManager.get_client_drive_service(user_settings)
if user_settings is None:
user_settings = threading.current_thread().settings
temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)
if not os.path.exists(temp_dir_path):
os.makedirs(temp_dir_path)
file_path = temp_dir_path + str(file_id) + ".csv"
if not os.path.exists(file_path):
request = drive_service.files().get_media(fileId=file_id)
fh = io.FileIO(file_path, mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024)
done = False
while done is False:
status, done = downloader.next_chunk()
fh.close()
header, rows = [], []
with open(file_path, 'rb') as csv_file:
for line in csv_file.readlines():
if not header:
header = [str(heading).strip() for heading in str(line).split(',')]
else:
row = line.split(',')
row_dict = {}
for index, column in enumerate(row):
row_dict[header[index]] = str(column).strip()
rows.append(row_dict)
return rows
elif metadata.get('mimeType', None) == 'application/vnd.google-apps.fusiontable':
ft_service = CredentialManager.get_client_fusion_table_service(user_settings)
query = ft_service.query()
table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False).execute(num_retries=3)
result_rows = []
columns = [str(column) for column in table['columns']]
rows = table['rows']
for row in rows:
result_row = {}
for index, cell in enumerate(row):
result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell
result_rows.append(result_row)
return result_rows
elif meta_err:
raise Exception('Unsupported file type for the file - ' + str(metadata['name'] + '.'))
return []
|
normal
|
{
"blob_id": "033719313f92aaf3c62eb1b07a9aa08f13c7bb6e",
"index": 2600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_metadata(file_id, user_settings=None):\n \"\"\"\n Obtains the metadata of a file\n\n :param str file_id: the identifier of the file whose metadata is needed\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n :return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink\n \"\"\"\n drive_service = CredentialManager.get_client_drive_service(user_settings)\n files_service = drive_service.files().get(fileId=file_id, fields=\n 'id, mimeType, size, parents, kind, name, fileExtension, webContentLink'\n )\n return files_service.execute(num_retries=3)\n\n\ndef get_file_contents(file_id, meta_err=False, user_settings=None):\n \"\"\"\n Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a\n Google fusion table.\n\n :param str file_id: the identifier of the file whose content is needed\n :param bool meta_err: optional, internal use only\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n\n :return: list of dictionaries where each dictionary is a row in the file\n :rtype: list\n \"\"\"\n metadata = get_metadata(file_id, user_settings)\n if (metadata.get('fileExtension', None) == 'csv' or metadata.get(\n 'mimeType', None) == 'text/csv') and metadata.get('webContentLink',\n None):\n drive_service = CredentialManager.get_client_drive_service(\n user_settings)\n if user_settings is None:\n user_settings = threading.current_thread().settings\n temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)\n if not os.path.exists(temp_dir_path):\n os.makedirs(temp_dir_path)\n file_path = temp_dir_path + str(file_id) + '.csv'\n if not os.path.exists(file_path):\n request = drive_service.files().get_media(fileId=file_id)\n fh = io.FileIO(file_path, mode='wb')\n downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024\n )\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n fh.close()\n header, rows = [], []\n with open(file_path, 'rb') as csv_file:\n for line in csv_file.readlines():\n if not header:\n header = [str(heading).strip() for heading in str(line)\n .split(',')]\n else:\n row = line.split(',')\n row_dict = {}\n for index, column in enumerate(row):\n row_dict[header[index]] = str(column).strip()\n rows.append(row_dict)\n return rows\n elif metadata.get('mimeType', None\n ) == 'application/vnd.google-apps.fusiontable':\n ft_service = CredentialManager.get_client_fusion_table_service(\n user_settings)\n query = ft_service.query()\n table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False\n ).execute(num_retries=3)\n result_rows = []\n columns = [str(column) for column in table['columns']]\n rows = table['rows']\n for row in rows:\n result_row = {}\n for index, cell in enumerate(row):\n result_row[columns[index]] = str(cell) if isinstance(cell,\n unicode) else cell\n result_rows.append(result_row)\n return result_rows\n elif meta_err:\n raise Exception('Unsupported file type for the file - ' + str(\n metadata['name'] + '.'))\n return []\n",
"step-3": "<mask token>\n\n\ndef list_files(query=None, order_by=None, files=False, user_settings=None):\n drive_service = CredentialManager.get_client_drive_service(user_settings)\n response = drive_service.files().list(orderBy=order_by, q=query,\n pageSize=1000, fields=\n 'nextPageToken, files(id, name, mimeType, fileExtension, parents)'\n ).execute(num_retries=3)\n result, resources, names, parents = [], [], {}, {}\n for drive_file in response.get('files', []):\n names[str(drive_file['id'])] = str(drive_file['name'])\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\n resources.append({'id': drive_file['id'], 'name': drive_file['name'\n ], 'parents': [str(parent) for parent in drive_file.get(\n 'parents', [])], 'mimeType': drive_file['mimeType']})\n while response.get('nextPageToken', None):\n drive_files = drive_service.files()\n response = drive_files.list(orderBy=order_by, q=query, pageSize=\n 1000, pageToken=response['nextPageToken'], fields=\n 'nextPageToken, files(id, name, mimeType, fileExtension, parents)'\n ).execute(num_retries=3)\n for drive_file in response.get('files', []):\n names[str(drive_file['id'])] = str(drive_file['name'])\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\n resources.append({'id': drive_file['id'], 'name': drive_file[\n 'name'], 'parents': [str(parent) for parent in drive_file.\n get('parents', [])], 'mimeType': drive_file['mimeType']})\n for resource in resources:\n if resource['parents']:\n for parent in resource['parents']:\n path = str(names.get(parent, '')) + str('/') + str(resource\n ['name'])\n while parents.get(parent, []):\n parent = str(parents[parent][0])\n path = str(names.get(parent, '')) + str('/') + path\n resource['name'] = path\n if files:\n if resource['mimeType'\n ] != 'application/vnd.google-apps.folder':\n result.append(resource)\n else:\n result.append(resource)\n elif files:\n if resource['mimeType'] != 'application/vnd.google-apps.folder':\n result.append(resource)\n else:\n result.append(resource)\n return result\n\n\ndef get_metadata(file_id, user_settings=None):\n \"\"\"\n Obtains the metadata of a file\n\n :param str file_id: the identifier of the file whose metadata is needed\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n :return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink\n \"\"\"\n drive_service = CredentialManager.get_client_drive_service(user_settings)\n files_service = drive_service.files().get(fileId=file_id, fields=\n 'id, mimeType, size, parents, kind, name, fileExtension, webContentLink'\n )\n return files_service.execute(num_retries=3)\n\n\ndef get_file_contents(file_id, meta_err=False, user_settings=None):\n \"\"\"\n Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a\n Google fusion table.\n\n :param str file_id: the identifier of the file whose content is needed\n :param bool meta_err: optional, internal use only\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n\n :return: list of dictionaries where each dictionary is a row in the file\n :rtype: list\n \"\"\"\n metadata = get_metadata(file_id, user_settings)\n if (metadata.get('fileExtension', None) == 'csv' or metadata.get(\n 'mimeType', None) == 'text/csv') and metadata.get('webContentLink',\n None):\n drive_service = CredentialManager.get_client_drive_service(\n user_settings)\n if user_settings is None:\n user_settings = threading.current_thread().settings\n temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)\n if not os.path.exists(temp_dir_path):\n os.makedirs(temp_dir_path)\n file_path = temp_dir_path + str(file_id) + '.csv'\n if not os.path.exists(file_path):\n request = drive_service.files().get_media(fileId=file_id)\n fh = io.FileIO(file_path, mode='wb')\n downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024\n )\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n fh.close()\n header, rows = [], []\n with open(file_path, 'rb') as csv_file:\n for line in csv_file.readlines():\n if not header:\n header = [str(heading).strip() for heading in str(line)\n .split(',')]\n else:\n row = line.split(',')\n row_dict = {}\n for index, column in enumerate(row):\n row_dict[header[index]] = str(column).strip()\n rows.append(row_dict)\n return rows\n elif metadata.get('mimeType', None\n ) == 'application/vnd.google-apps.fusiontable':\n ft_service = CredentialManager.get_client_fusion_table_service(\n user_settings)\n query = ft_service.query()\n table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False\n ).execute(num_retries=3)\n result_rows = []\n columns = [str(column) for column in table['columns']]\n rows = table['rows']\n for row in rows:\n result_row = {}\n for index, cell in enumerate(row):\n result_row[columns[index]] = str(cell) if isinstance(cell,\n unicode) else cell\n result_rows.append(result_row)\n return result_rows\n elif meta_err:\n raise Exception('Unsupported file type for the file - ' + str(\n metadata['name'] + '.'))\n return []\n",
"step-4": "<mask token>\nimport io\nimport os\nimport threading\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom colorker.security import CredentialManager\nfrom colorker.settings import STORAGE\n\n\ndef list_files(query=None, order_by=None, files=False, user_settings=None):\n drive_service = CredentialManager.get_client_drive_service(user_settings)\n response = drive_service.files().list(orderBy=order_by, q=query,\n pageSize=1000, fields=\n 'nextPageToken, files(id, name, mimeType, fileExtension, parents)'\n ).execute(num_retries=3)\n result, resources, names, parents = [], [], {}, {}\n for drive_file in response.get('files', []):\n names[str(drive_file['id'])] = str(drive_file['name'])\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\n resources.append({'id': drive_file['id'], 'name': drive_file['name'\n ], 'parents': [str(parent) for parent in drive_file.get(\n 'parents', [])], 'mimeType': drive_file['mimeType']})\n while response.get('nextPageToken', None):\n drive_files = drive_service.files()\n response = drive_files.list(orderBy=order_by, q=query, pageSize=\n 1000, pageToken=response['nextPageToken'], fields=\n 'nextPageToken, files(id, name, mimeType, fileExtension, parents)'\n ).execute(num_retries=3)\n for drive_file in response.get('files', []):\n names[str(drive_file['id'])] = str(drive_file['name'])\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\n resources.append({'id': drive_file['id'], 'name': drive_file[\n 'name'], 'parents': [str(parent) for parent in drive_file.\n get('parents', [])], 'mimeType': drive_file['mimeType']})\n for resource in resources:\n if resource['parents']:\n for parent in resource['parents']:\n path = str(names.get(parent, '')) + str('/') + str(resource\n ['name'])\n while parents.get(parent, []):\n parent = str(parents[parent][0])\n path = str(names.get(parent, '')) + str('/') + path\n resource['name'] = path\n if files:\n if resource['mimeType'\n ] != 'application/vnd.google-apps.folder':\n result.append(resource)\n else:\n result.append(resource)\n elif files:\n if resource['mimeType'] != 'application/vnd.google-apps.folder':\n result.append(resource)\n else:\n result.append(resource)\n return result\n\n\ndef get_metadata(file_id, user_settings=None):\n \"\"\"\n Obtains the metadata of a file\n\n :param str file_id: the identifier of the file whose metadata is needed\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n :return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink\n \"\"\"\n drive_service = CredentialManager.get_client_drive_service(user_settings)\n files_service = drive_service.files().get(fileId=file_id, fields=\n 'id, mimeType, size, parents, kind, name, fileExtension, webContentLink'\n )\n return files_service.execute(num_retries=3)\n\n\ndef get_file_contents(file_id, meta_err=False, user_settings=None):\n \"\"\"\n Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a\n Google fusion table.\n\n :param str file_id: the identifier of the file whose content is needed\n :param bool meta_err: optional, internal use only\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\n If one is not provided, then this method must be invoked by an EngineThread\n which defines the settings\n\n :return: list of dictionaries where each dictionary is a row in the file\n :rtype: list\n \"\"\"\n metadata = get_metadata(file_id, user_settings)\n if (metadata.get('fileExtension', None) == 'csv' or metadata.get(\n 'mimeType', None) == 'text/csv') and metadata.get('webContentLink',\n None):\n drive_service = CredentialManager.get_client_drive_service(\n user_settings)\n if user_settings is None:\n user_settings = threading.current_thread().settings\n temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)\n if not os.path.exists(temp_dir_path):\n os.makedirs(temp_dir_path)\n file_path = temp_dir_path + str(file_id) + '.csv'\n if not os.path.exists(file_path):\n request = drive_service.files().get_media(fileId=file_id)\n fh = io.FileIO(file_path, mode='wb')\n downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024\n )\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n fh.close()\n header, rows = [], []\n with open(file_path, 'rb') as csv_file:\n for line in csv_file.readlines():\n if not header:\n header = [str(heading).strip() for heading in str(line)\n .split(',')]\n else:\n row = line.split(',')\n row_dict = {}\n for index, column in enumerate(row):\n row_dict[header[index]] = str(column).strip()\n rows.append(row_dict)\n return rows\n elif metadata.get('mimeType', None\n ) == 'application/vnd.google-apps.fusiontable':\n ft_service = CredentialManager.get_client_fusion_table_service(\n user_settings)\n query = ft_service.query()\n table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False\n ).execute(num_retries=3)\n result_rows = []\n columns = [str(column) for column in table['columns']]\n rows = table['rows']\n for row in rows:\n result_row = {}\n for index, cell in enumerate(row):\n result_row[columns[index]] = str(cell) if isinstance(cell,\n unicode) else cell\n result_rows.append(result_row)\n return result_rows\n elif meta_err:\n raise Exception('Unsupported file type for the file - ' + str(\n metadata['name'] + '.'))\n return []\n",
"step-5": "#!/usr/bin/python\r\n#\r\n# Author: Johnson Kachikaran ([email protected])\r\n# Date: 7th August 2016\r\n# Google Drive API:\r\n# https://developers.google.com/drive/v3/reference/\r\n# https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/\r\n\"\"\"\r\nIncludes functions to integrate with a user's Google drive. The results and implementation is based on the API\r\nprovided by the Google Drive API:\r\n\r\nhttps://developers.google.com/drive/v3/reference/\r\n\r\nhttps://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/\r\n\"\"\"\r\nimport io\r\nimport os\r\nimport threading\r\n\r\nfrom googleapiclient.http import MediaIoBaseDownload\r\n\r\nfrom colorker.security import CredentialManager\r\nfrom colorker.settings import STORAGE\r\n\r\n\r\ndef list_files(query=None, order_by=None, files=False, user_settings=None):\r\n drive_service = CredentialManager.get_client_drive_service(user_settings)\r\n response = drive_service.files().list(\r\n orderBy=order_by, q=query, pageSize=1000,\r\n fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)\r\n result, resources, names, parents = [], [], {}, {}\r\n for drive_file in response.get('files', []):\r\n names[str(drive_file['id'])] = str(drive_file['name'])\r\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\r\n resources.append({'id': drive_file['id'], 'name': drive_file['name'],\r\n 'parents': [str(parent) for parent in drive_file.get('parents', [])],\r\n 'mimeType': drive_file['mimeType']})\r\n while response.get('nextPageToken', None):\r\n drive_files = drive_service.files()\r\n response = drive_files.list(orderBy=order_by, q=query, pageSize=1000, pageToken=response['nextPageToken'],\r\n fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)\r\n for drive_file in response.get('files', []):\r\n names[str(drive_file['id'])] = str(drive_file['name'])\r\n parents[str(drive_file['id'])] = drive_file.get('parents', [])\r\n resources.append({'id': drive_file['id'], 'name': drive_file['name'],\r\n 'parents': [str(parent) for parent in drive_file.get('parents', [])],\r\n 'mimeType': drive_file['mimeType']})\r\n for resource in resources:\r\n if resource['parents']:\r\n for parent in resource['parents']:\r\n path = str(names.get(parent, '')) + str('/') + str(resource['name'])\r\n while parents.get(parent, []):\r\n parent = str(parents[parent][0])\r\n path = str(names.get(parent, '')) + str('/') + path\r\n resource['name'] = path\r\n if files:\r\n if resource['mimeType'] != 'application/vnd.google-apps.folder':\r\n result.append(resource)\r\n else:\r\n result.append(resource)\r\n else:\r\n if files:\r\n if resource['mimeType'] != 'application/vnd.google-apps.folder':\r\n result.append(resource)\r\n else:\r\n result.append(resource)\r\n return result\r\n\r\n\r\ndef get_metadata(file_id, user_settings=None):\r\n \"\"\"\r\n Obtains the metadata of a file\r\n\r\n :param str file_id: the identifier of the file whose metadata is needed\r\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\r\n If one is not provided, then this method must be invoked by an EngineThread\r\n which defines the settings\r\n :return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink\r\n \"\"\"\r\n drive_service = CredentialManager.get_client_drive_service(user_settings)\r\n files_service = drive_service.files().get(\r\n fileId=file_id, fields='id, mimeType, size, parents, kind, name, fileExtension, webContentLink')\r\n return files_service.execute(num_retries=3)\r\n\r\n\r\ndef get_file_contents(file_id, meta_err=False, user_settings=None):\r\n \"\"\"\r\n Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a\r\n Google fusion table.\r\n\r\n :param str file_id: the identifier of the file whose content is needed\r\n :param bool meta_err: optional, internal use only\r\n :param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.\r\n If one is not provided, then this method must be invoked by an EngineThread\r\n which defines the settings\r\n\r\n :return: list of dictionaries where each dictionary is a row in the file\r\n :rtype: list\r\n \"\"\"\r\n metadata = get_metadata(file_id, user_settings)\r\n if (metadata.get('fileExtension', None) == 'csv' or metadata.get('mimeType', None) == 'text/csv') and metadata.get(\r\n 'webContentLink', None):\r\n drive_service = CredentialManager.get_client_drive_service(user_settings)\r\n if user_settings is None:\r\n user_settings = threading.current_thread().settings\r\n temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)\r\n if not os.path.exists(temp_dir_path):\r\n os.makedirs(temp_dir_path)\r\n file_path = temp_dir_path + str(file_id) + \".csv\"\r\n if not os.path.exists(file_path):\r\n request = drive_service.files().get_media(fileId=file_id)\r\n fh = io.FileIO(file_path, mode='wb')\r\n downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024)\r\n done = False\r\n while done is False:\r\n status, done = downloader.next_chunk()\r\n fh.close()\r\n header, rows = [], []\r\n with open(file_path, 'rb') as csv_file:\r\n for line in csv_file.readlines():\r\n if not header:\r\n header = [str(heading).strip() for heading in str(line).split(',')]\r\n else:\r\n row = line.split(',')\r\n row_dict = {}\r\n for index, column in enumerate(row):\r\n row_dict[header[index]] = str(column).strip()\r\n rows.append(row_dict)\r\n return rows\r\n elif metadata.get('mimeType', None) == 'application/vnd.google-apps.fusiontable':\r\n ft_service = CredentialManager.get_client_fusion_table_service(user_settings)\r\n query = ft_service.query()\r\n table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False).execute(num_retries=3)\r\n result_rows = []\r\n columns = [str(column) for column in table['columns']]\r\n rows = table['rows']\r\n for row in rows:\r\n result_row = {}\r\n for index, cell in enumerate(row):\r\n result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell\r\n result_rows.append(result_row)\r\n return result_rows\r\n elif meta_err:\r\n raise Exception('Unsupported file type for the file - ' + str(metadata['name'] + '.'))\r\n return []\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# Start the HTML and Javascript code
print '''
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["treemap"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
'''
print CountBugs('path/to/repo')
# Finish the HTML and Javascript
print '''
]);
// Create and draw the visualization.
var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));
tree.draw(data, {
maxDepth: 2,
minColor: 'YellowGreen',
midColor: 'LightGoldenRodYellow',
maxColor: 'Red',
headerHeight: 15,
fontColor: 'black',
showScale: true});
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
'''
|
normal
|
{
"blob_id": "0e112ecfd4ccf762234dff564dd6f3987418dedd",
"index": 1033,
"step-1": "# Start the HTML and Javascript code\nprint '''\n<html>\n <head>\n <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n <script type=\"text/javascript\">\n google.load(\"visualization\", \"1\", {packages:[\"treemap\"]});\n google.setOnLoadCallback(drawChart);\n function drawChart() {\n'''\n\nprint CountBugs('path/to/repo')\n\n# Finish the HTML and Javascript\nprint '''\n ]);\n\n // Create and draw the visualization.\n var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));\n tree.draw(data, {\n maxDepth: 2,\n minColor: 'YellowGreen',\n midColor: 'LightGoldenRodYellow',\n maxColor: 'Red',\n headerHeight: 15,\n fontColor: 'black',\n showScale: true});\n }\n </script>\n </head>\n\n <body>\n <div id=\"chart_div\" style=\"width: 900px; height: 500px;\"></div>\n </body>\n</html>\n'''\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
vetor = ["pares de pregos ligados por uma linha"]
indice do vetor representa os pregos na vertical, e o
inteiro em cada pos, os pregos na horizontal.
i(vertical) e j(horizontal) entao:
vetor[i] = j
pregos a(vertical) e pregos b(horizontal)
se a>i and b<j or a<i and b>j
a e i(são indices) b e j(são os elemntos salvos na pos)
'''
def merge(p,n):
global vet
global aux
if n <= 1:
return 0
c = merge(p,n//2) + merge(p+n//2,n-n//2)
d,a,b = 0,0,n//2
while d<n:
if a != n//2 and (b == n or vet[p+a]<vet[p+b]):
aux[d] = vet[p+a]
a+=1
else:
aux[d] = vet[p+b]
c+=n//2+a
b+=1
d+=1
for i in range(n):
vet[p+i] = aux[i]
return c
entrada = int(input())
vet = [int(x) for x in input().split()]
aux = [0]*entrada
print(merge(0,entrada))
|
normal
|
{
"blob_id": "fe081a422db6b7f10c89179beab852c6b74ec687",
"index": 2795,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\nprint(merge(0, entrada))\n",
"step-4": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0] * entrada\nprint(merge(0, entrada))\n",
"step-5": "'''\nvetor = [\"pares de pregos ligados por uma linha\"]\nindice do vetor representa os pregos na vertical, e o\ninteiro em cada pos, os pregos na horizontal.\n\ni(vertical) e j(horizontal) entao:\n vetor[i] = j\n\npregos a(vertical) e pregos b(horizontal)\n\nse a>i and b<j or a<i and b>j\n\na e i(são indices) b e j(são os elemntos salvos na pos)\n'''\n\ndef merge(p,n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p,n//2) + merge(p+n//2,n-n//2)\n d,a,b = 0,0,n//2\n while d<n:\n if a != n//2 and (b == n or vet[p+a]<vet[p+b]):\n aux[d] = vet[p+a]\n a+=1\n else:\n aux[d] = vet[p+b]\n c+=n//2+a\n b+=1\n d+=1\n for i in range(n):\n vet[p+i] = aux[i]\n return c\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0]*entrada\nprint(merge(0,entrada))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Dang Kai
# @Date: 2018-10-30 15:52:57
# @Last Modified time: 2018-11-10 09:09:21
# @E-mail: [email protected]
# @Description:
from time import sleep
import sys
sys.path.append('../')
from common.encapsulation import BasePage
class IndexPage:
def login(self, username, password):
# 登录页面
BasePage.open_url(self,self.base_url)
BasePage.send_key(self,'css','#username',username)
BasePage.send_key(self,'css',"#password",password)
BasePage.click_element(self,"css",".ant-btn")
if __name__ == '__main__':
login_cookies(self)
|
normal
|
{
"blob_id": "463f50567c9dd4b7b47a84eea715541cec5d3cb5",
"index": 2110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-4": "from time import sleep\nimport sys\nsys.path.append('../')\nfrom common.encapsulation import BasePage\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: Dang Kai\n# @Date: 2018-10-30 15:52:57\n# @Last Modified time: 2018-11-10 09:09:21\n# @E-mail: [email protected]\n# @Description:\nfrom time import sleep\nimport sys\nsys.path.append('../')\nfrom common.encapsulation import BasePage\n\n\nclass IndexPage:\n\n def login(self, username, password):\n # 登录页面\n BasePage.open_url(self,self.base_url)\n BasePage.send_key(self,'css','#username',username)\n BasePage.send_key(self,'css',\"#password\",password)\n BasePage.click_element(self,\"css\",\".ant-btn\")\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.interpolate import Interpolate
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import FrontExtractorOp
class InterpFrontExtractor(FrontExtractorOp):
op = 'Interp'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {
'height': param.height,
'width': param.width,
'zoom_factor': param.zoom_factor,
'shrink_factor': param.shrink_factor,
}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]),
'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
|
normal
|
{
"blob_id": "ce28462621a423c6661c672cf92d7e9c91875cfa",
"index": 8247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n <mask token>\n <mask token>\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-3": "<mask token>\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-4": "<mask token>\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.caffe.collect_attributes import merge_attrs\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.extractor import FrontExtractorOp\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-5": "\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.caffe.collect_attributes import merge_attrs\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.extractor import FrontExtractorOp\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n\n update_attrs = {\n 'height': param.height,\n 'width': param.width,\n 'zoom_factor': param.zoom_factor,\n 'shrink_factor': param.shrink_factor,\n }\n\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]),\n 'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict["modal"]
print("model:", module)
thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict.get["modal"]
print("model:", module)
|
normal
|
{
"blob_id": "3d854c83488eeafa035ccf5d333eeeae63505255",
"index": 6908,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('model:', module)\n<mask token>\nprint('model:', module)\n",
"step-3": "thisdict = {'brand': 'ford', 'model': 'Mustang', 'year': 1964}\nmodule = thisdict['modal']\nprint('model:', module)\nthisdict = {'brand': 'ford', 'model': 'Mustang', 'year': 1964}\nmodule = thisdict.get['modal']\nprint('model:', module)\n",
"step-4": "thisdict = {\"brand\": \"ford\", \"model\": \"Mustang\", \"year\": 1964}\nmodule = thisdict[\"modal\"]\nprint(\"model:\", module)\n\nthisdict = {\"brand\": \"ford\", \"model\": \"Mustang\", \"year\": 1964}\nmodule = thisdict.get[\"modal\"]\nprint(\"model:\", module)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from api.serializers.cart import CartSerializer
from api.serializers.product import ProductSerializer, ProductPopular
from api.serializers.type import TypeSerializer
from api.serializers.user import UserCreationSerializer, UserSerializer
from api.serializers.history import HistorySerializer
from api.serializers.order import OrderSerializer
from api.serializers.comment import CommentSerializer
from api.serializers.reply import ReplySerializer
from api.serializers.reason import ReasonSerializer
from api.serializers.waitinglist import WaitinglistSerializer
|
normal
|
{
"blob_id": "f0ff15a2392b439a54c5ec304192117c08978755",
"index": 4930,
"step-1": "<mask token>\n",
"step-2": "from api.serializers.cart import CartSerializer\nfrom api.serializers.product import ProductSerializer, ProductPopular\nfrom api.serializers.type import TypeSerializer\nfrom api.serializers.user import UserCreationSerializer, UserSerializer\nfrom api.serializers.history import HistorySerializer\nfrom api.serializers.order import OrderSerializer\nfrom api.serializers.comment import CommentSerializer\nfrom api.serializers.reply import ReplySerializer\nfrom api.serializers.reason import ReasonSerializer\nfrom api.serializers.waitinglist import WaitinglistSerializer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Generated by Django 2.2.6 on 2019-12-23 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Pages', '0014_auto_20191223_2032'),
]
operations = [
migrations.AlterField(
model_name='dept',
name='Hospital_id',
field=models.ForeignKey(default='null', on_delete=django.db.models.deletion.CASCADE, to='Pages.Hospital'),
),
]
|
normal
|
{
"blob_id": "d09984c6e6a0ce82389dbbbade63507e9687355d",
"index": 771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Pages', '0014_auto_20191223_2032')]\n operations = [migrations.AlterField(model_name='dept', name=\n 'Hospital_id', field=models.ForeignKey(default='null', on_delete=\n django.db.models.deletion.CASCADE, to='Pages.Hospital'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Pages', '0014_auto_20191223_2032')]\n operations = [migrations.AlterField(model_name='dept', name=\n 'Hospital_id', field=models.ForeignKey(default='null', on_delete=\n django.db.models.deletion.CASCADE, to='Pages.Hospital'))]\n",
"step-5": "# Generated by Django 2.2.6 on 2019-12-23 16:38\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Pages', '0014_auto_20191223_2032'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='dept',\n name='Hospital_id',\n field=models.ForeignKey(default='null', on_delete=django.db.models.deletion.CASCADE, to='Pages.Hospital'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*-coding:utf-8-*-
# Author:SemaseMing <blog.v-api.cn>
# Email: [email protected]
# Time: 2016-10-19 11:56
import gevent
def foo():
print('Running in foo')
gevent.sleep(0)
print('Explicit context switch to foo ageni')
def bar():
print('Explicit context to bar')
gevent.sleep(0)
print('Implicit contenxt switch back to bar')
gevent.joinall([gevent.spawn(foo), gevent.spawn(bar)])
|
normal
|
{
"blob_id": "7f131e17f4fbd7d6b333a51dae557ddb07c30046",
"index": 9077,
"step-1": "<mask token>\n\n\ndef bar():\n print('Explicit context to bar')\n gevent.sleep(0)\n print('Implicit contenxt switch back to bar')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef foo():\n print('Running in foo')\n gevent.sleep(0)\n print('Explicit context switch to foo ageni')\n\n\ndef bar():\n print('Explicit context to bar')\n gevent.sleep(0)\n print('Implicit contenxt switch back to bar')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef foo():\n print('Running in foo')\n gevent.sleep(0)\n print('Explicit context switch to foo ageni')\n\n\ndef bar():\n print('Explicit context to bar')\n gevent.sleep(0)\n print('Implicit contenxt switch back to bar')\n\n\ngevent.joinall([gevent.spawn(foo), gevent.spawn(bar)])\n",
"step-4": "import gevent\n\n\ndef foo():\n print('Running in foo')\n gevent.sleep(0)\n print('Explicit context switch to foo ageni')\n\n\ndef bar():\n print('Explicit context to bar')\n gevent.sleep(0)\n print('Implicit contenxt switch back to bar')\n\n\ngevent.joinall([gevent.spawn(foo), gevent.spawn(bar)])\n",
"step-5": "#!/usr/bin/env python\n# -*-coding:utf-8-*-\n# Author:SemaseMing <blog.v-api.cn>\n# Email: [email protected]\n# Time: 2016-10-19 11:56\n\nimport gevent\n\n\ndef foo():\n print('Running in foo')\n gevent.sleep(0)\n print('Explicit context switch to foo ageni')\n\n\ndef bar():\n print('Explicit context to bar')\n gevent.sleep(0)\n print('Implicit contenxt switch back to bar')\n\ngevent.joinall([gevent.spawn(foo), gevent.spawn(bar)])",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def main():
a, b = map(int, input().split())
diff = abs(max(b, a) - min(a, b))
if diff % 2 != 0:
print("IMPOSSIBLE")
else:
bigger = max(a, b)
ans = bigger - (diff//2)
print(ans)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "f73cbc25152a63bb6552e2cd8272c67a1f4277ba",
"index": 9044,
"step-1": "<mask token>\n",
"step-2": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print('IMPOSSIBLE')\n else:\n bigger = max(a, b)\n ans = bigger - diff // 2\n print(ans)\n\n\n<mask token>\n",
"step-3": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print('IMPOSSIBLE')\n else:\n bigger = max(a, b)\n ans = bigger - diff // 2\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\n a, b = map(int, input().split())\n diff = abs(max(b, a) - min(a, b))\n if diff % 2 != 0:\n print(\"IMPOSSIBLE\")\n else:\n bigger = max(a, b)\n ans = bigger - (diff//2)\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Testing
import sys, os
sys.dont_write_bytecode = True
import argparse, socket
from requestframe import RequestFrame
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--header-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the headers (0-10). Default = 5", default=5)
parser.add_argument("--body-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the body (0-10). Default = 5", default=5)
parser.add_argument("--request-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the request line (0-10). Default = 5", default=5)
parser.add_argument("--body-type", type=str, choices=['json', 'junk', 'rand'], help="Set the data generated in the request body. Default = rand", default='rand')
parser.add_argument("--num-headers", type=int, help="Sets the maximum number of headers. Default = number of available headers", default=-1)
parser.add_argument("--generate-num", type=int, help="Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1", default=1)
parser.add_argument('-v', '--version', action='version', version='HTTPFuzz Version: 1.0.1')
args = parser.parse_args()
if args.generate_num > 1:
try:
os.mkdir("output")
for i in range(args.generate_num):
with open("output/{}.txt".format(i + 1), 'w') as f:
request_frame = RequestFrame(args)
request_frame.generate()
f.write(request_frame.request)
print("[+] Wrote request to /output/{}.txt".format(i + 1))
exit("[+] Finished creating requests")
except:
exit("[-] Couldn't make the output directory. It might already exist.")
request_frame = RequestFrame(args)
request_frame.generate()
exit(request_frame.request)
|
normal
|
{
"blob_id": "350a79d6cead6814ad48292b14a204e753dc938c",
"index": 4363,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-4": "import sys, os\nsys.dont_write_bytecode = True\nimport argparse, socket\nfrom requestframe import RequestFrame\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-5": "# Testing\nimport sys, os\nsys.dont_write_bytecode = True\n\nimport argparse, socket\nfrom requestframe import RequestFrame\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--header-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the headers (0-10). Default = 5\", default=5)\n parser.add_argument(\"--body-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the body (0-10). Default = 5\", default=5)\n parser.add_argument(\"--request-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the request line (0-10). Default = 5\", default=5)\n parser.add_argument(\"--body-type\", type=str, choices=['json', 'junk', 'rand'], help=\"Set the data generated in the request body. Default = rand\", default='rand')\n parser.add_argument(\"--num-headers\", type=int, help=\"Sets the maximum number of headers. Default = number of available headers\", default=-1)\n parser.add_argument(\"--generate-num\", type=int, help=\"Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1\", default=1)\n parser.add_argument('-v', '--version', action='version', version='HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir(\"output\")\n for i in range(args.generate_num):\n with open(\"output/{}.txt\".format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print(\"[+] Wrote request to /output/{}.txt\".format(i + 1))\n exit(\"[+] Finished creating requests\")\n except:\n exit(\"[-] Couldn't make the output directory. It might already exist.\")\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
import time
def calculate_time(func):
def inner_fn(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print("Time taken to execute \'{}\' function is: {} seconds".format(func.__name__, round(end - start, 2)))
return inner_fn
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
factorial(20)
|
normal
|
{
"blob_id": "7c9c13974e1deeb55f08c9e251e8c876cedcad6b",
"index": 2484,
"step-1": "<mask token>\n\n\n@calculate_time\ndef factorial(num):\n time.sleep(2)\n print(math.factorial(num))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate_time(func):\n\n def inner_fn(*args, **kwargs):\n start = time.time()\n func(*args, **kwargs)\n end = time.time()\n print(\"Time taken to execute '{}' function is: {} seconds\".format(\n func.__name__, round(end - start, 2)))\n return inner_fn\n\n\n@calculate_time\ndef factorial(num):\n time.sleep(2)\n print(math.factorial(num))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate_time(func):\n\n def inner_fn(*args, **kwargs):\n start = time.time()\n func(*args, **kwargs)\n end = time.time()\n print(\"Time taken to execute '{}' function is: {} seconds\".format(\n func.__name__, round(end - start, 2)))\n return inner_fn\n\n\n@calculate_time\ndef factorial(num):\n time.sleep(2)\n print(math.factorial(num))\n\n\nfactorial(20)\n",
"step-4": "import math\nimport time\n\n\ndef calculate_time(func):\n\n def inner_fn(*args, **kwargs):\n start = time.time()\n func(*args, **kwargs)\n end = time.time()\n print(\"Time taken to execute '{}' function is: {} seconds\".format(\n func.__name__, round(end - start, 2)))\n return inner_fn\n\n\n@calculate_time\ndef factorial(num):\n time.sleep(2)\n print(math.factorial(num))\n\n\nfactorial(20)\n",
"step-5": "import math\r\nimport time\r\n\r\ndef calculate_time(func):\r\n\r\n def inner_fn(*args, **kwargs):\r\n start = time.time()\r\n func(*args, **kwargs)\r\n end = time.time()\r\n\r\n print(\"Time taken to execute \\'{}\\' function is: {} seconds\".format(func.__name__, round(end - start, 2)))\r\n \r\n return inner_fn\r\n\r\n@calculate_time\r\ndef factorial(num):\r\n time.sleep(2)\r\n print(math.factorial(num))\r\n\r\nfactorial(20)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Flask, jsonify, request, send_file, render_template
from flask_cors import CORS
from twilio.rest import Client
import autocomplete
from gtts import gTTS
import os
# Set up the model.
autocomplete.load()
app = Flask(__name__)
CORS(app)
# The application
@app.route("/")
def index():
return render_template("index.html")
# Create a class for custom error messages (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).
class InvalidUsage(Exception):
status_code = 400
# Initialize the InvalidUsage exception.
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
# Convert the exception information into a dictionary.
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# Register the custom exception with the error handler (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# Converts English text to speech.
@app.route('/convert_text_to_speech', methods=['POST'])
def convert_text_to_speech():
# Check to see if the required parameters are present.
if 'text_to_convert' not in request.values.keys():
raise InvalidUsage("No text included for conversion", status_code = 400)
# Send the post request.
tts = gTTS(text=request.values['text_to_convert'], lang='en')
tts.save('converted_text.mp3')
os.system('start converted_text.mp3')
# Return the sound file.
return send_file('converted_text.mp3', mimetype='audio/mpeg')
# Get suggestions for words that the user typed in.
@app.route('/get_suggestion', methods=['GET','POST'])
def get_suggestion():
# Raise an exception if the required parameters are not specified.
if "words" not in request.values.keys():
raise InvalidUsage("No words were specified for prediction.", status_code = 400)
# Predict the next word.
text = request.values['words']
prediction = [];
if len(text.split(" ")) > 1:
prediction = autocomplete.split_predict(text, 10)
else:
prediction = autocomplete.predict_currword(text, 10)
return jsonify(prediction)
# Adds text message support to allow Don to send text messages.
@app.route('/send_text', methods=['GET', 'POST'])
def send_text():
# Raise an exception if the required parameters are not specified.
if "text" not in request.values.keys():
raise InvalidUsage("The text message was not found in the request.", status_code = 400)
if "to" not in request.values.keys():
raise InvalidUsage("The to-number was not found in the request", status_code = 400)
# Extract the required information from the request body.
text = request.values['text']
to_number = request.values['to']
# Set up the account credentials - in a production project, this would be placed in a "secrets" file.
account_sid = "ACbbd2cff98bcbbad08f76b03701a0f2d9"
auth_token = "7d786ff14c6b4572a6e8e78f8ad6aee5"
# Send the text message.
client = Client(account_sid, auth_token)
message = client.messages.create(
from_="+12267992139",
to=to_number,
body=text)
return jsonify({"to":to_number, "message":message.body, "error code":message.error_code})
|
normal
|
{
"blob_id": "8980ac4db2657d3dbd2b70b33a4d13a077d4590e",
"index": 2266,
"step-1": "<mask token>\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\[email protected](InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n<mask token>\n\n\[email protected]('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\[email protected]('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\[email protected](InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\[email protected]('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\[email protected]('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\[email protected]('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-3": "<mask token>\nautocomplete.load()\n<mask token>\nCORS(app)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\[email protected](InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\[email protected]('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\[email protected]('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\[email protected]('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-4": "<mask token>\nautocomplete.load()\napp = Flask(__name__)\nCORS(app)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\[email protected](InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\[email protected]('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\[email protected]('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\[email protected]('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-5": "from flask import Flask, jsonify, request, send_file, render_template\nfrom flask_cors import CORS\nfrom twilio.rest import Client\nimport autocomplete\nfrom gtts import gTTS\nimport os\n\n# Set up the model.\nautocomplete.load()\napp = Flask(__name__)\nCORS(app)\n\n# The application\[email protected](\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\n# Create a class for custom error messages (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).\nclass InvalidUsage(Exception):\n\tstatus_code = 400\n\n\t# Initialize the InvalidUsage exception.\n\tdef __init__(self, message, status_code=None, payload=None):\n\t\tException.__init__(self)\n\t\tself.message = message\n\t\tif status_code is not None:\n\t\t\tself.status_code = status_code\n\t\tself.payload = payload\n\n\t# Convert the exception information into a dictionary.\n\tdef to_dict(self):\n\t\trv = dict(self.payload or ())\n\t\trv['message'] = self.message\n\t\treturn rv\n\n# Register the custom exception with the error handler (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).\[email protected](InvalidUsage)\ndef handle_invalid_usage(error):\n\tresponse = jsonify(error.to_dict())\n\tresponse.status_code = error.status_code\n\treturn response\n\n# Converts English text to speech.\[email protected]('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n\t# Check to see if the required parameters are present.\n\tif 'text_to_convert' not in request.values.keys():\n\t\traise InvalidUsage(\"No text included for conversion\", status_code = 400)\n\t\t\n\t# Send the post request.\n\ttts = gTTS(text=request.values['text_to_convert'], lang='en')\n\ttts.save('converted_text.mp3')\n\tos.system('start converted_text.mp3')\n\t\n\t# Return the sound file.\n\treturn send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n# Get suggestions for words that the user typed in.\[email protected]('/get_suggestion', methods=['GET','POST'])\ndef get_suggestion():\n\t# Raise an exception if the required parameters are not specified.\n\tif \"words\" not in request.values.keys():\n\t\traise InvalidUsage(\"No words were specified for prediction.\", status_code = 400)\n\t\n\t# Predict the next word.\n\ttext = request.values['words']\n\tprediction = [];\n\tif len(text.split(\" \")) > 1:\n\t\tprediction = autocomplete.split_predict(text, 10)\n\telse:\n\t\tprediction = autocomplete.predict_currword(text, 10)\n\t\t\n\treturn jsonify(prediction)\n\t\n# Adds text message support to allow Don to send text messages.\[email protected]('/send_text', methods=['GET', 'POST'])\ndef send_text():\n\t# Raise an exception if the required parameters are not specified.\n\tif \"text\" not in request.values.keys():\n\t\traise InvalidUsage(\"The text message was not found in the request.\", status_code = 400)\n\tif \"to\" not in request.values.keys():\n\t\traise InvalidUsage(\"The to-number was not found in the request\", status_code = 400)\n\t\n\t# Extract the required information from the request body.\n\ttext = request.values['text']\n\tto_number = request.values['to']\n\t\n\t# Set up the account credentials - in a production project, this would be placed in a \"secrets\" file.\n\taccount_sid = \"ACbbd2cff98bcbbad08f76b03701a0f2d9\"\n\tauth_token = \"7d786ff14c6b4572a6e8e78f8ad6aee5\"\n\t\n\t# Send the text message.\n\tclient = Client(account_sid, auth_token)\n\tmessage = client.messages.create(\n\t\tfrom_=\"+12267992139\",\n\t\tto=to_number,\n\t\tbody=text)\n\n\treturn jsonify({\"to\":to_number, \"message\":message.body, \"error code\":message.error_code})\n\t",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
#!/usr/bin/python
#_*_ coding: utf-8 _*_
import MySQLdb as mdb
import sys
con = mdb.connect("localhost","testuser","testdB","testdb")
with con:
cur = con.cursor()
cur.execute("UPDATE Writers SET Name = %s WHERE Id = %s ",
("Guy de manupassant", "4"))
print "Number of rows updated: %d "% cur.rowcount
|
normal
|
{
"blob_id": "94a84c7143763c6b7ccea1049cdec8b7011798cd",
"index": 6569,
"step-1": "#!/usr/bin/python\n#_*_ coding: utf-8 _*_\n\nimport MySQLdb as mdb\nimport sys\n\ncon = mdb.connect(\"localhost\",\"testuser\",\"testdB\",\"testdb\")\n\nwith con:\n cur = con.cursor()\n\n cur.execute(\"UPDATE Writers SET Name = %s WHERE Id = %s \",\n (\"Guy de manupassant\", \"4\"))\n print \"Number of rows updated: %d \"% cur.rowcount\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def chess():
row = 0
line = 0
chess1 = []
chess2 = []
for line in range(3):
x1 = (0,line)
chess1.append(x1)
for line in range(3):
x2 = (1,line)
chess2.append(x2)
print(chess1)
print(chess2)
for x in range(len(chess1))
if chess2[x][1] != chess1[]
chess()
|
normal
|
{
"blob_id": "7d0d1a53a249167edade24a4e9305c95288a8574",
"index": 4851,
"step-1": "def chess():\n row = 0\n line = 0\n chess1 = []\n chess2 = []\n for line in range(3):\n x1 = (0,line)\n chess1.append(x1)\n for line in range(3):\n x2 = (1,line)\n chess2.append(x2)\n print(chess1)\n print(chess2)\n for x in range(len(chess1))\n if chess2[x][1] != chess1[]\n \nchess()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.0.5 on 2018-07-12 11:08
import assessment.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0006_auto_20180712_1428'),
]
operations = [
migrations.AlterModelManagers(
name='season',
managers=[
('objects', assessment.models.SeasonManager()),
],
),
migrations.AlterField(
model_name='punishmentreward',
name='method',
field=models.TextField(verbose_name='روش'),
),
migrations.AlterField(
model_name='scaleanswer',
name='carried_on',
field=models.BooleanField(default=False, verbose_name='انجام\u200cشده'),
),
migrations.AlterField(
model_name='scaleanswer',
name='qualitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کیفی'),
),
migrations.AlterField(
model_name='scaleanswer',
name='quantitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کمی'),
),
]
|
normal
|
{
"blob_id": "adff75857a1de24267e771c599e4d89486a6ad32",
"index": 7439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('assessment', '0006_auto_20180712_1428')]\n operations = [migrations.AlterModelManagers(name='season', managers=[(\n 'objects', assessment.models.SeasonManager())]), migrations.\n AlterField(model_name='punishmentreward', name='method', field=\n models.TextField(verbose_name='روش')), migrations.AlterField(\n model_name='scaleanswer', name='carried_on', field=models.\n BooleanField(default=False, verbose_name='انجام\\u200cشده')),\n migrations.AlterField(model_name='scaleanswer', name=\n 'qualitativeAnswer', field=models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='پاسخ کیفی')), migrations.AlterField(\n model_name='scaleanswer', name='quantitativeAnswer', field=models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'پاسخ کمی'))]\n",
"step-4": "import assessment.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('assessment', '0006_auto_20180712_1428')]\n operations = [migrations.AlterModelManagers(name='season', managers=[(\n 'objects', assessment.models.SeasonManager())]), migrations.\n AlterField(model_name='punishmentreward', name='method', field=\n models.TextField(verbose_name='روش')), migrations.AlterField(\n model_name='scaleanswer', name='carried_on', field=models.\n BooleanField(default=False, verbose_name='انجام\\u200cشده')),\n migrations.AlterField(model_name='scaleanswer', name=\n 'qualitativeAnswer', field=models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='پاسخ کیفی')), migrations.AlterField(\n model_name='scaleanswer', name='quantitativeAnswer', field=models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'پاسخ کمی'))]\n",
"step-5": "# Generated by Django 2.0.5 on 2018-07-12 11:08\n\nimport assessment.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assessment', '0006_auto_20180712_1428'),\n ]\n\n operations = [\n migrations.AlterModelManagers(\n name='season',\n managers=[\n ('objects', assessment.models.SeasonManager()),\n ],\n ),\n migrations.AlterField(\n model_name='punishmentreward',\n name='method',\n field=models.TextField(verbose_name='روش'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='carried_on',\n field=models.BooleanField(default=False, verbose_name='انجام\\u200cشده'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='qualitativeAnswer',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کیفی'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='quantitativeAnswer',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کمی'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.2.7 on 2021-09-11 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),
]
operations = [
migrations.AlterField(
model_name='order',
name='created_at',
field=models.IntegerField(blank=True, null=True),
),
]
|
normal
|
{
"blob_id": "de347b41cd88947690cb42e043880a80d81e2c5c",
"index": 436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-09-11 19:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='created_at',\n field=models.IntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Queue:
def __init__(self):
self.head = None
self.tail = None
class Node:
def __init__(self, data):
self.data = data
self.next = None
def isEmpty(self):
return self.head is None
def peek(self):
return self.head.data if self.head is not None else None
def add(self, data):
node = self.Node(data)
if(self.tail is not None):
self.tail.next = node
self.tail = node
if (self.head is None):
self.head = node
def remove(self):
data = self.head.data
self.head = self.head.next
if (self.head is None):
self.tail = None
return data
|
normal
|
{
"blob_id": "1aca1cf11d64374d0e0786e74c16567a4c5a1dec",
"index": 6452,
"step-1": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n <mask token>\n <mask token>\n",
"step-2": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n <mask token>\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-3": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n\n def add(self, data):\n node = self.Node(data)\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n if self.head is None:\n self.head = node\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-4": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def isEmpty(self):\n return self.head is None\n\n def peek(self):\n return self.head.data if self.head is not None else None\n\n def add(self, data):\n node = self.Node(data)\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n if self.head is None:\n self.head = node\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-5": "class Queue:\n def __init__(self):\n self.head = None\n self.tail = None\n \n class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n def isEmpty(self):\n return self.head is None\n def peek(self):\n return self.head.data if self.head is not None else None\n def add(self, data):\n node = self.Node(data)\n if(self.tail is not None):\n self.tail.next = node\n self.tail = node\n if (self.head is None):\n self.head = node\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if (self.head is None):\n self.tail = None\n return data\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
for i in sys.stdin:
test_num = int(i)
if test_num == 0:
break
count = 11
while digit_sum(test_num) != digit_sum(count * test_num):
count = count + 1
print('{}'.format(count))
|
normal
|
{
"blob_id": "0d37b6f0ea8854f9d4d4cd2ff235fa39bab7cc12",
"index": 6549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-4": "import sys\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def hookCurve(self,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)
|
normal
|
{
"blob_id": "e14319e705a3c1cdf85e0a2fe77c211e2afa9baa",
"index": 9880,
"step-1": "<mask token>\n\n\nclass Crysdata:\n\n def __init__(self, F, cb):\n self.start = time.time()\n print('Draw timer started')\n self.name = F\n self.cell = Cell(cb)\n self.atoms = readEl(cb)\n self.pos = readPos(cb)\n c = self.cell\n self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen, c.blen,\n c.clen, c.alpha, c.beta, c.gamma)\n\n def printout(self):\n print(self.name)\n print()\n self.cell.printout()\n print()\n for element in self.pos:\n element.printout()\n print()\n for element in self.atoms:\n element.printout()\n print()\n print('Fractional to cartesian matrix:')\n print(self.ftoc)\n\n def get_fractional_to_cartesian_matrix(self, a, b, c, alpha, beta, gamma):\n \"\"\"\n Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a\n\n !changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates\n\n Return the transformation matrix that converts fractional coordinates to\n cartesian coordinates.\n Parameters\n ----------\n a, b, c : float\n The lengths of the edges.\n alpha, gamma, beta : float\n The angles between the sides.\n angle_in_degrees : bool\n True if alpha, beta and gamma are expressed in degrees.\n Returns\n -------\n r : array_like\n The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.\n \"\"\"\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n cosa = np.cos(alpha)\n sina = np.sin(alpha)\n cosb = np.cos(beta)\n sinb = np.sin(beta)\n cosg = np.cos(gamma)\n sing = np.sin(gamma)\n volume = (1.0 - cosa ** 2.0 - cosb ** 2.0 - cosg ** 2.0 + 2.0 *\n cosa * cosb * cosg)\n volume = a * b * c * np.sqrt(volume)\n r = np.zeros((3, 3))\n r[0, 0] = float(a)\n r[0, 1] = float(b * cosg)\n r[0, 2] = float(c * cosb)\n r[1, 0] = float(0)\n r[1, 1] = float(b * sing)\n r[1, 2] = float(c * (cosa - cosb * cosg) / sing)\n r[2, 0] = float(0)\n r[2, 1] = float(0)\n r[2, 2] = float(volume / (a * b * sing))\n return r\n\n def drawCrystal(self):\n if draw_lattice:\n self.drawCell()\n print('Lattice drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n self.drawAtoms()\n print('Atoms drawn after {:.3f} seconds'.format(time.time() - self.\n start))\n if draw_bonds:\n self.drawBonds()\n print('Bonds drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n\n def drawAtoms(self):\n for a in self.atoms:\n a.drawObj(self.ftoc)\n print('Atoms drawn:', len(self.atoms))\n\n def drawCell(self):\n cell_corners = []\n cell_edges = []\n for i in range(2):\n for j in range(2):\n for k in range(2):\n bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,\n location=toCarth(self.ftoc, [i, j, k]))\n activeObject = bpy.context.active_object\n cell_corners.append(activeObject)\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0\n ]\n for i, j in zip([0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 6], [1, 2, 4, 3, \n 5, 3, 6, 7, 5, 6, 7, 7]):\n cell_edges.append(self.drawLine(cell_corners[i].location,\n cell_corners[j].location))\n for i in cell_corners:\n i.select_set(action='SELECT')\n for i in cell_edges:\n i.select_set(action='SELECT')\n bpy.context.view_layer.objects.active = cell_corners[0]\n bpy.ops.object.join()\n print('Cell box drawn')\n\n def drawLine(self, ac, tc):\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[\n draw_quality], radius=lattice_size, depth=dist, location=(dx / \n 2 + ac[0], dy / 2 + ac[1], dz / 2 + ac[2]))\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0]\n phi = math.atan2(dy, dx)\n theta = math.acos(dz / dist)\n bpy.context.object.rotation_euler[1] = theta\n bpy.context.object.rotation_euler[2] = phi\n return activeObject\n\n def drawBonds(self):\n cnt = 0\n bpy.ops.curve.primitive_bezier_circle_add(location=(0, 0, 0),\n radius=bond_radius)\n bpy.context.object.name = 'bez'\n for atom in self.atoms:\n for target in self.atoms:\n if atom != target:\n if 'bond{}-{}'.format(target.elid, atom.elid\n ) in bpy.data.objects:\n continue\n if atom.sym == 'H' and target.sym == 'H':\n continue\n if calcDistance(self.ftoc, atom, target) <= bond_distance:\n self.makeBond(atom, target)\n cnt += 1\n print('Atom bonds drawn:', cnt)\n\n def makeBond(self, atom, target):\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n o1 = bpy.data.objects[atom.elid]\n o2 = bpy.data.objects[target.elid]\n bond = self.hookCurve(o1, o2, bpy.context.scene)\n bpy.context.object.data.bevel_object = bpy.data.objects['bez']\n bpy.context.object.name = 'bond{}-{}'.format(atom.elid, target.elid)\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [255, 255, 255]\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n <mask token>\n\n\nclass Cell:\n\n def __init__(self, cb):\n self.alen = float(cb['_cell_length_a'])\n self.blen = float(cb['_cell_length_b'])\n self.clen = float(cb['_cell_length_c'])\n self.alpha = float(cb['_cell_angle_alpha'])\n self.beta = float(cb['_cell_angle_beta'])\n self.gamma = float(cb['_cell_angle_gamma'])\n\n def printout(self):\n print(\n 'alen:{:8} \\nblen:{:8} \\nclen:{:8} \\nalpha:{:8} \\nbeta: {:8} \\ngamma:{:8}'\n .format(self.alen, self.blen, self.clen, self.alpha, self.beta,\n self.gamma))\n\n\nclass Atom:\n\n def __init__(self, elid, sym, xpos, ypos, zpos):\n self.elid = elid\n self.sym = sym\n self.xpos = float(xpos)\n self.ypos = float(ypos)\n self.zpos = float(zpos)\n\n def printout(self):\n print('id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}'.format(self.\n elid, self.sym, self.xpos, self.ypos, self.zpos))\n\n def drawObj(self, ftoc):\n size = sizedic[self.sym] * styledic[draw_style][0\n ] + bond_radius * styledic[draw_style][1]\n bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[\n draw_quality], ring_count=qualitydic[draw_quality] / 2, size=\n size, location=toCarth(ftoc, [self.xpos, self.ypos, self.zpos]))\n bpy.context.object.name = self.elid\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n if atom_name:\n bpy.context.object.show_name = True\n if atom_color:\n bpy.context.object.active_material.diffuse_color = colordic[self\n .sym]\n else:\n bpy.context.object.active_material.diffuse_color = [1, 1, 1]\n\n\nclass sympos:\n\n def __init__(self, string):\n self.xsym = string[0].split(',')[0]\n self.ysym = string[0].split(',')[1]\n self.zsym = string[0].split(',')[2]\n\n def printout(self):\n print('x:{:8} y:{:8} z:{:8}'.format(self.xsym, self.ysym, self.zsym))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScanFileOperator(bpy.types.Operator):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Operator(bpy.types.Operator):\n bl_idname = 'object.cdtb_operator'\n bl_label = 'CDTB_operator'\n bl_descriptor = 'Operator for drawing crystal'\n\n def execute(self, context):\n global pars_check\n global user_feedback\n if pars_check:\n user_feedback = 'CiFFile module not installed'\n return {'FINISHED'}\n if file_path == 'Select a file':\n print('No file selected')\n user_feedback = 'No File selected'\n else:\n user_feedback = 'Crystal drawn'\n global draw_bonds\n draw_bonds = context.scene.draw_bonds\n global bond_distance\n bond_distance = context.scene.bond_distance\n global draw_lattice\n draw_lattice = context.scene.draw_lattice\n global atom_name\n atom_name = context.scene.atom_name\n global print_data\n print_data = context.scene.print_data\n global draw_style\n global atom_color\n draw_style = context.scene.style_selection_mode\n if draw_style == 'STICK':\n draw_bonds = True\n atom_color = False\n else:\n atom_color = True\n global draw_quality\n draw_quality = context.scene.quality_selection_mode\n global add_camera\n add_camera = context.scene.add_camera\n drawCrystal(file_path)\n return {'FINISHED'}\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(name=\n 'Draw bonds', description='Draw bonds between elements')\n bpy.types.Scene.bond_distance = bpy.props.FloatProperty(name=\n 'Bond distance', description=\n 'Set max distance for bonds to occur', default=2, min=0.0, max=\n 10.0, precision=2)\n bpy.types.Scene.atom_name = bpy.props.BoolProperty(name=\n 'Atom names', description='Display the name of atoms')\n bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(name=\n 'Draw lattice', description='Draw unit cell outline')\n bpy.types.Scene.print_data = bpy.props.BoolProperty(name=\n 'Print data', description='Print crystal data in terminal')\n selection_style = [('SPACE FILLING', 'SPACE FILLING', '', 1), (\n 'BALL AND STICK', 'BALL AND STICK', '', 2), ('STICK', 'STICK',\n '', 3)]\n bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(items\n =selection_style, name='Style')\n selection_qual = [('MIN', 'MIN', '', 1), ('LOW', 'LOW', '', 2), (\n 'MED', 'MED', '', 3), ('HIGH', 'HIGH', '', 4), ('MAX', 'MAX',\n '', 5)]\n bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(items\n =selection_qual, name='Quality', default='MED')\n bpy.types.Scene.add_camera = bpy.props.BoolProperty(name=\n 'Place camera', description=\n 'Place a camera and light to make rendering possible')\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\nclass Panel(bpy.types.Panel):\n bl_idname = 'CDTB_Panel'\n bl_label = 'CDTB_Panel'\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_context = 'objectmode'\n bl_category = 'CDTB'\n\n def draw(self, context):\n scn = context.scene\n layout = self.layout\n layout.label(text='Input file', icon_value=112)\n \"\"\"\n for i in range(100):\n layout.label(text = str(i),icon_value =i)\n \"\"\"\n box = layout.box()\n row = box.row()\n splitrow = row.split(factor=0.075)\n left_col = splitrow.column()\n right_col = splitrow.column()\n left_col.operator('error.scan_file', icon_value=108, text='')\n right_col.label(text=file_path.rsplit('\\\\', 2)[-1])\n layout.label(text='Settings', icon_value=117)\n box = layout.box()\n box.prop(scn, 'draw_bonds')\n box.prop(scn, 'bond_distance')\n box.prop(scn, 'draw_lattice')\n box.prop(scn, 'atom_name')\n box.prop(scn, 'print_data')\n box.prop(scn, 'style_selection_mode')\n box.prop(scn, 'quality_selection_mode')\n box.prop(scn, 'add_camera')\n layout.separator()\n splitrow = layout.split(factor=0.3)\n col = splitrow.column()\n col.operator('object.cdtb_operator', text='Draw Crystal')\n col = splitrow.column()\n col.label(text=user_feedback)\n layout.separator()\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\n<mask token>\n\n\nclass Crysdata:\n\n def __init__(self, F, cb):\n self.start = time.time()\n print('Draw timer started')\n self.name = F\n self.cell = Cell(cb)\n self.atoms = readEl(cb)\n self.pos = readPos(cb)\n c = self.cell\n self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen, c.blen,\n c.clen, c.alpha, c.beta, c.gamma)\n\n def printout(self):\n print(self.name)\n print()\n self.cell.printout()\n print()\n for element in self.pos:\n element.printout()\n print()\n for element in self.atoms:\n element.printout()\n print()\n print('Fractional to cartesian matrix:')\n print(self.ftoc)\n\n def get_fractional_to_cartesian_matrix(self, a, b, c, alpha, beta, gamma):\n \"\"\"\n Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a\n\n !changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates\n\n Return the transformation matrix that converts fractional coordinates to\n cartesian coordinates.\n Parameters\n ----------\n a, b, c : float\n The lengths of the edges.\n alpha, gamma, beta : float\n The angles between the sides.\n angle_in_degrees : bool\n True if alpha, beta and gamma are expressed in degrees.\n Returns\n -------\n r : array_like\n The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.\n \"\"\"\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n cosa = np.cos(alpha)\n sina = np.sin(alpha)\n cosb = np.cos(beta)\n sinb = np.sin(beta)\n cosg = np.cos(gamma)\n sing = np.sin(gamma)\n volume = (1.0 - cosa ** 2.0 - cosb ** 2.0 - cosg ** 2.0 + 2.0 *\n cosa * cosb * cosg)\n volume = a * b * c * np.sqrt(volume)\n r = np.zeros((3, 3))\n r[0, 0] = float(a)\n r[0, 1] = float(b * cosg)\n r[0, 2] = float(c * cosb)\n r[1, 0] = float(0)\n r[1, 1] = float(b * sing)\n r[1, 2] = float(c * (cosa - cosb * cosg) / sing)\n r[2, 0] = float(0)\n r[2, 1] = float(0)\n r[2, 2] = float(volume / (a * b * sing))\n return r\n\n def drawCrystal(self):\n if draw_lattice:\n self.drawCell()\n print('Lattice drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n self.drawAtoms()\n print('Atoms drawn after {:.3f} seconds'.format(time.time() - self.\n start))\n if draw_bonds:\n self.drawBonds()\n print('Bonds drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n\n def drawAtoms(self):\n for a in self.atoms:\n a.drawObj(self.ftoc)\n print('Atoms drawn:', len(self.atoms))\n\n def drawCell(self):\n cell_corners = []\n cell_edges = []\n for i in range(2):\n for j in range(2):\n for k in range(2):\n bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,\n location=toCarth(self.ftoc, [i, j, k]))\n activeObject = bpy.context.active_object\n cell_corners.append(activeObject)\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0\n ]\n for i, j in zip([0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 6], [1, 2, 4, 3, \n 5, 3, 6, 7, 5, 6, 7, 7]):\n cell_edges.append(self.drawLine(cell_corners[i].location,\n cell_corners[j].location))\n for i in cell_corners:\n i.select_set(action='SELECT')\n for i in cell_edges:\n i.select_set(action='SELECT')\n bpy.context.view_layer.objects.active = cell_corners[0]\n bpy.ops.object.join()\n print('Cell box drawn')\n\n def drawLine(self, ac, tc):\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[\n draw_quality], radius=lattice_size, depth=dist, location=(dx / \n 2 + ac[0], dy / 2 + ac[1], dz / 2 + ac[2]))\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0]\n phi = math.atan2(dy, dx)\n theta = math.acos(dz / dist)\n bpy.context.object.rotation_euler[1] = theta\n bpy.context.object.rotation_euler[2] = phi\n return activeObject\n\n def drawBonds(self):\n cnt = 0\n bpy.ops.curve.primitive_bezier_circle_add(location=(0, 0, 0),\n radius=bond_radius)\n bpy.context.object.name = 'bez'\n for atom in self.atoms:\n for target in self.atoms:\n if atom != target:\n if 'bond{}-{}'.format(target.elid, atom.elid\n ) in bpy.data.objects:\n continue\n if atom.sym == 'H' and target.sym == 'H':\n continue\n if calcDistance(self.ftoc, atom, target) <= bond_distance:\n self.makeBond(atom, target)\n cnt += 1\n print('Atom bonds drawn:', cnt)\n\n def makeBond(self, atom, target):\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n o1 = bpy.data.objects[atom.elid]\n o2 = bpy.data.objects[target.elid]\n bond = self.hookCurve(o1, o2, bpy.context.scene)\n bpy.context.object.data.bevel_object = bpy.data.objects['bez']\n bpy.context.object.name = 'bond{}-{}'.format(atom.elid, target.elid)\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [255, 255, 255]\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n\n def hookCurve(self, o1, o2, scn):\n curve = bpy.data.curves.new('link', 'CURVE')\n curve.dimensions = '3D'\n spline = curve.splines.new('BEZIER')\n spline.bezier_points.add(1)\n p0 = spline.bezier_points[0]\n p1 = spline.bezier_points[1]\n p0.handle_right_type = 'VECTOR'\n p1.handle_left_type = 'VECTOR'\n obj = bpy.data.objects.new('link', curve)\n m0 = obj.modifiers.new('alpha', 'HOOK')\n m0.object = o1\n m1 = obj.modifiers.new('beta', 'HOOK')\n m1.object = o2\n bpy.context.collection.objects.link(obj)\n bpy.context.view_layer.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p0.select_control_point = True\n p1.select_control_point = False\n bpy.ops.object.hook_assign(modifier='alpha')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p1.select_control_point = True\n p0.select_control_point = False\n bpy.ops.object.hook_assign(modifier='beta')\n return obj\n\n\nclass Cell:\n\n def __init__(self, cb):\n self.alen = float(cb['_cell_length_a'])\n self.blen = float(cb['_cell_length_b'])\n self.clen = float(cb['_cell_length_c'])\n self.alpha = float(cb['_cell_angle_alpha'])\n self.beta = float(cb['_cell_angle_beta'])\n self.gamma = float(cb['_cell_angle_gamma'])\n\n def printout(self):\n print(\n 'alen:{:8} \\nblen:{:8} \\nclen:{:8} \\nalpha:{:8} \\nbeta: {:8} \\ngamma:{:8}'\n .format(self.alen, self.blen, self.clen, self.alpha, self.beta,\n self.gamma))\n\n\nclass Atom:\n\n def __init__(self, elid, sym, xpos, ypos, zpos):\n self.elid = elid\n self.sym = sym\n self.xpos = float(xpos)\n self.ypos = float(ypos)\n self.zpos = float(zpos)\n\n def printout(self):\n print('id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}'.format(self.\n elid, self.sym, self.xpos, self.ypos, self.zpos))\n\n def drawObj(self, ftoc):\n size = sizedic[self.sym] * styledic[draw_style][0\n ] + bond_radius * styledic[draw_style][1]\n bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[\n draw_quality], ring_count=qualitydic[draw_quality] / 2, size=\n size, location=toCarth(ftoc, [self.xpos, self.ypos, self.zpos]))\n bpy.context.object.name = self.elid\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n if atom_name:\n bpy.context.object.show_name = True\n if atom_color:\n bpy.context.object.active_material.diffuse_color = colordic[self\n .sym]\n else:\n bpy.context.object.active_material.diffuse_color = [1, 1, 1]\n\n\nclass sympos:\n\n def __init__(self, string):\n self.xsym = string[0].split(',')[0]\n self.ysym = string[0].split(',')[1]\n self.zsym = string[0].split(',')[2]\n\n def printout(self):\n print('x:{:8} y:{:8} z:{:8}'.format(self.xsym, self.ysym, self.zsym))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ScanFileOperator(bpy.types.Operator):\n bl_idname = 'error.scan_file'\n bl_label = 'Scan file for return'\n filepath = bpy.props.StringProperty(subtype='FILE_PATH')\n\n def execute(self, context):\n global file_path\n global user_feedback\n user_feedback = ''\n file_path = self.filepath\n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\n def register():\n bpy.types.Scene.path_to_file = bpy.props.StringProperty(name='',\n description='Path to CIF file', default='empty')\n\n\nclass Operator(bpy.types.Operator):\n bl_idname = 'object.cdtb_operator'\n bl_label = 'CDTB_operator'\n bl_descriptor = 'Operator for drawing crystal'\n\n def execute(self, context):\n global pars_check\n global user_feedback\n if pars_check:\n user_feedback = 'CiFFile module not installed'\n return {'FINISHED'}\n if file_path == 'Select a file':\n print('No file selected')\n user_feedback = 'No File selected'\n else:\n user_feedback = 'Crystal drawn'\n global draw_bonds\n draw_bonds = context.scene.draw_bonds\n global bond_distance\n bond_distance = context.scene.bond_distance\n global draw_lattice\n draw_lattice = context.scene.draw_lattice\n global atom_name\n atom_name = context.scene.atom_name\n global print_data\n print_data = context.scene.print_data\n global draw_style\n global atom_color\n draw_style = context.scene.style_selection_mode\n if draw_style == 'STICK':\n draw_bonds = True\n atom_color = False\n else:\n atom_color = True\n global draw_quality\n draw_quality = context.scene.quality_selection_mode\n global add_camera\n add_camera = context.scene.add_camera\n drawCrystal(file_path)\n return {'FINISHED'}\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(name=\n 'Draw bonds', description='Draw bonds between elements')\n bpy.types.Scene.bond_distance = bpy.props.FloatProperty(name=\n 'Bond distance', description=\n 'Set max distance for bonds to occur', default=2, min=0.0, max=\n 10.0, precision=2)\n bpy.types.Scene.atom_name = bpy.props.BoolProperty(name=\n 'Atom names', description='Display the name of atoms')\n bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(name=\n 'Draw lattice', description='Draw unit cell outline')\n bpy.types.Scene.print_data = bpy.props.BoolProperty(name=\n 'Print data', description='Print crystal data in terminal')\n selection_style = [('SPACE FILLING', 'SPACE FILLING', '', 1), (\n 'BALL AND STICK', 'BALL AND STICK', '', 2), ('STICK', 'STICK',\n '', 3)]\n bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(items\n =selection_style, name='Style')\n selection_qual = [('MIN', 'MIN', '', 1), ('LOW', 'LOW', '', 2), (\n 'MED', 'MED', '', 3), ('HIGH', 'HIGH', '', 4), ('MAX', 'MAX',\n '', 5)]\n bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(items\n =selection_qual, name='Quality', default='MED')\n bpy.types.Scene.add_camera = bpy.props.BoolProperty(name=\n 'Place camera', description=\n 'Place a camera and light to make rendering possible')\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\nclass Panel(bpy.types.Panel):\n bl_idname = 'CDTB_Panel'\n bl_label = 'CDTB_Panel'\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_context = 'objectmode'\n bl_category = 'CDTB'\n\n def draw(self, context):\n scn = context.scene\n layout = self.layout\n layout.label(text='Input file', icon_value=112)\n \"\"\"\n for i in range(100):\n layout.label(text = str(i),icon_value =i)\n \"\"\"\n box = layout.box()\n row = box.row()\n splitrow = row.split(factor=0.075)\n left_col = splitrow.column()\n right_col = splitrow.column()\n left_col.operator('error.scan_file', icon_value=108, text='')\n right_col.label(text=file_path.rsplit('\\\\', 2)[-1])\n layout.label(text='Settings', icon_value=117)\n box = layout.box()\n box.prop(scn, 'draw_bonds')\n box.prop(scn, 'bond_distance')\n box.prop(scn, 'draw_lattice')\n box.prop(scn, 'atom_name')\n box.prop(scn, 'print_data')\n box.prop(scn, 'style_selection_mode')\n box.prop(scn, 'quality_selection_mode')\n box.prop(scn, 'add_camera')\n layout.separator()\n splitrow = layout.split(factor=0.3)\n col = splitrow.column()\n col.operator('object.cdtb_operator', text='Draw Crystal')\n col = splitrow.column()\n col.label(text=user_feedback)\n layout.separator()\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\n<mask token>\n\n\ndef unregister():\n bpy.utils.unregister_class(Operator)\n bpy.utils.unregister_class(Panel)\n bpy.utils.unregister_class(ScanFileOperator)\n\n\nclass Crysdata:\n\n def __init__(self, F, cb):\n self.start = time.time()\n print('Draw timer started')\n self.name = F\n self.cell = Cell(cb)\n self.atoms = readEl(cb)\n self.pos = readPos(cb)\n c = self.cell\n self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen, c.blen,\n c.clen, c.alpha, c.beta, c.gamma)\n\n def printout(self):\n print(self.name)\n print()\n self.cell.printout()\n print()\n for element in self.pos:\n element.printout()\n print()\n for element in self.atoms:\n element.printout()\n print()\n print('Fractional to cartesian matrix:')\n print(self.ftoc)\n\n def get_fractional_to_cartesian_matrix(self, a, b, c, alpha, beta, gamma):\n \"\"\"\n Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a\n\n !changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates\n\n Return the transformation matrix that converts fractional coordinates to\n cartesian coordinates.\n Parameters\n ----------\n a, b, c : float\n The lengths of the edges.\n alpha, gamma, beta : float\n The angles between the sides.\n angle_in_degrees : bool\n True if alpha, beta and gamma are expressed in degrees.\n Returns\n -------\n r : array_like\n The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.\n \"\"\"\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n cosa = np.cos(alpha)\n sina = np.sin(alpha)\n cosb = np.cos(beta)\n sinb = np.sin(beta)\n cosg = np.cos(gamma)\n sing = np.sin(gamma)\n volume = (1.0 - cosa ** 2.0 - cosb ** 2.0 - cosg ** 2.0 + 2.0 *\n cosa * cosb * cosg)\n volume = a * b * c * np.sqrt(volume)\n r = np.zeros((3, 3))\n r[0, 0] = float(a)\n r[0, 1] = float(b * cosg)\n r[0, 2] = float(c * cosb)\n r[1, 0] = float(0)\n r[1, 1] = float(b * sing)\n r[1, 2] = float(c * (cosa - cosb * cosg) / sing)\n r[2, 0] = float(0)\n r[2, 1] = float(0)\n r[2, 2] = float(volume / (a * b * sing))\n return r\n\n def drawCrystal(self):\n if draw_lattice:\n self.drawCell()\n print('Lattice drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n self.drawAtoms()\n print('Atoms drawn after {:.3f} seconds'.format(time.time() - self.\n start))\n if draw_bonds:\n self.drawBonds()\n print('Bonds drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n\n def drawAtoms(self):\n for a in self.atoms:\n a.drawObj(self.ftoc)\n print('Atoms drawn:', len(self.atoms))\n\n def drawCell(self):\n cell_corners = []\n cell_edges = []\n for i in range(2):\n for j in range(2):\n for k in range(2):\n bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,\n location=toCarth(self.ftoc, [i, j, k]))\n activeObject = bpy.context.active_object\n cell_corners.append(activeObject)\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0\n ]\n for i, j in zip([0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 6], [1, 2, 4, 3, \n 5, 3, 6, 7, 5, 6, 7, 7]):\n cell_edges.append(self.drawLine(cell_corners[i].location,\n cell_corners[j].location))\n for i in cell_corners:\n i.select_set(action='SELECT')\n for i in cell_edges:\n i.select_set(action='SELECT')\n bpy.context.view_layer.objects.active = cell_corners[0]\n bpy.ops.object.join()\n print('Cell box drawn')\n\n def drawLine(self, ac, tc):\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[\n draw_quality], radius=lattice_size, depth=dist, location=(dx / \n 2 + ac[0], dy / 2 + ac[1], dz / 2 + ac[2]))\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0]\n phi = math.atan2(dy, dx)\n theta = math.acos(dz / dist)\n bpy.context.object.rotation_euler[1] = theta\n bpy.context.object.rotation_euler[2] = phi\n return activeObject\n\n def drawBonds(self):\n cnt = 0\n bpy.ops.curve.primitive_bezier_circle_add(location=(0, 0, 0),\n radius=bond_radius)\n bpy.context.object.name = 'bez'\n for atom in self.atoms:\n for target in self.atoms:\n if atom != target:\n if 'bond{}-{}'.format(target.elid, atom.elid\n ) in bpy.data.objects:\n continue\n if atom.sym == 'H' and target.sym == 'H':\n continue\n if calcDistance(self.ftoc, atom, target) <= bond_distance:\n self.makeBond(atom, target)\n cnt += 1\n print('Atom bonds drawn:', cnt)\n\n def makeBond(self, atom, target):\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n o1 = bpy.data.objects[atom.elid]\n o2 = bpy.data.objects[target.elid]\n bond = self.hookCurve(o1, o2, bpy.context.scene)\n bpy.context.object.data.bevel_object = bpy.data.objects['bez']\n bpy.context.object.name = 'bond{}-{}'.format(atom.elid, target.elid)\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [255, 255, 255]\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n\n def hookCurve(self, o1, o2, scn):\n curve = bpy.data.curves.new('link', 'CURVE')\n curve.dimensions = '3D'\n spline = curve.splines.new('BEZIER')\n spline.bezier_points.add(1)\n p0 = spline.bezier_points[0]\n p1 = spline.bezier_points[1]\n p0.handle_right_type = 'VECTOR'\n p1.handle_left_type = 'VECTOR'\n obj = bpy.data.objects.new('link', curve)\n m0 = obj.modifiers.new('alpha', 'HOOK')\n m0.object = o1\n m1 = obj.modifiers.new('beta', 'HOOK')\n m1.object = o2\n bpy.context.collection.objects.link(obj)\n bpy.context.view_layer.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p0.select_control_point = True\n p1.select_control_point = False\n bpy.ops.object.hook_assign(modifier='alpha')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p1.select_control_point = True\n p0.select_control_point = False\n bpy.ops.object.hook_assign(modifier='beta')\n return obj\n\n\nclass Cell:\n\n def __init__(self, cb):\n self.alen = float(cb['_cell_length_a'])\n self.blen = float(cb['_cell_length_b'])\n self.clen = float(cb['_cell_length_c'])\n self.alpha = float(cb['_cell_angle_alpha'])\n self.beta = float(cb['_cell_angle_beta'])\n self.gamma = float(cb['_cell_angle_gamma'])\n\n def printout(self):\n print(\n 'alen:{:8} \\nblen:{:8} \\nclen:{:8} \\nalpha:{:8} \\nbeta: {:8} \\ngamma:{:8}'\n .format(self.alen, self.blen, self.clen, self.alpha, self.beta,\n self.gamma))\n\n\nclass Atom:\n\n def __init__(self, elid, sym, xpos, ypos, zpos):\n self.elid = elid\n self.sym = sym\n self.xpos = float(xpos)\n self.ypos = float(ypos)\n self.zpos = float(zpos)\n\n def printout(self):\n print('id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}'.format(self.\n elid, self.sym, self.xpos, self.ypos, self.zpos))\n\n def drawObj(self, ftoc):\n size = sizedic[self.sym] * styledic[draw_style][0\n ] + bond_radius * styledic[draw_style][1]\n bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[\n draw_quality], ring_count=qualitydic[draw_quality] / 2, size=\n size, location=toCarth(ftoc, [self.xpos, self.ypos, self.zpos]))\n bpy.context.object.name = self.elid\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n if atom_name:\n bpy.context.object.show_name = True\n if atom_color:\n bpy.context.object.active_material.diffuse_color = colordic[self\n .sym]\n else:\n bpy.context.object.active_material.diffuse_color = [1, 1, 1]\n\n\nclass sympos:\n\n def __init__(self, string):\n self.xsym = string[0].split(',')[0]\n self.ysym = string[0].split(',')[1]\n self.zsym = string[0].split(',')[2]\n\n def printout(self):\n print('x:{:8} y:{:8} z:{:8}'.format(self.xsym, self.ysym, self.zsym))\n\n\ndef readEl(cb):\n elements = []\n previd = []\n idcnt = []\n lb = cb.GetLoop('_atom_site_label')\n for el in lb:\n flag = False\n for i in range(len(previd)):\n if el[0] == previd[i]:\n flag = True\n break\n if flag:\n idcnt[i] += 1\n else:\n previd.append(el[0])\n idcnt.append(0)\n i = len(idcnt) - 1\n id_t = '{}.{}'.format(el[0], idcnt[i])\n elements.append(Atom(id_t, el[1], el[2], el[3], el[4]))\n return elements\n\n\ndef readPos(cb):\n positions = []\n lb = cb.GetLoop('_symmetry_equiv_pos_as_xyz')\n for el in lb:\n positions.append(sympos(el))\n return positions\n\n\n<mask token>\n\n\ndef addCamera(x, y, z):\n bpy.ops.object.camera_add(view_align=True, enter_editmode=False,\n location=(5 * x, 5 * y, 5 * z))\n print('camera added')\n bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))\n obj_camera = bpy.data.objects['Camera']\n look_at(obj_camera, Vector([0, 0, z / 4]))\n obj_camera.data.type = 'ORTHO'\n obj_camera.data.ortho_scale = x + y + z\n\n\ndef clearWS():\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False)\n for i in bpy.data.curves:\n bpy.data.curves.remove(i)\n for m in bpy.data.materials:\n bpy.data.materials.remove(m)\n for c in bpy.data.cameras:\n bpy.data.cameras.remove(c)\n print('Workspace cleared.')\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ScanFileOperator(bpy.types.Operator):\n bl_idname = 'error.scan_file'\n bl_label = 'Scan file for return'\n filepath = bpy.props.StringProperty(subtype='FILE_PATH')\n\n def execute(self, context):\n global file_path\n global user_feedback\n user_feedback = ''\n file_path = self.filepath\n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\n def register():\n bpy.types.Scene.path_to_file = bpy.props.StringProperty(name='',\n description='Path to CIF file', default='empty')\n\n\nclass Operator(bpy.types.Operator):\n bl_idname = 'object.cdtb_operator'\n bl_label = 'CDTB_operator'\n bl_descriptor = 'Operator for drawing crystal'\n\n def execute(self, context):\n global pars_check\n global user_feedback\n if pars_check:\n user_feedback = 'CiFFile module not installed'\n return {'FINISHED'}\n if file_path == 'Select a file':\n print('No file selected')\n user_feedback = 'No File selected'\n else:\n user_feedback = 'Crystal drawn'\n global draw_bonds\n draw_bonds = context.scene.draw_bonds\n global bond_distance\n bond_distance = context.scene.bond_distance\n global draw_lattice\n draw_lattice = context.scene.draw_lattice\n global atom_name\n atom_name = context.scene.atom_name\n global print_data\n print_data = context.scene.print_data\n global draw_style\n global atom_color\n draw_style = context.scene.style_selection_mode\n if draw_style == 'STICK':\n draw_bonds = True\n atom_color = False\n else:\n atom_color = True\n global draw_quality\n draw_quality = context.scene.quality_selection_mode\n global add_camera\n add_camera = context.scene.add_camera\n drawCrystal(file_path)\n return {'FINISHED'}\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(name=\n 'Draw bonds', description='Draw bonds between elements')\n bpy.types.Scene.bond_distance = bpy.props.FloatProperty(name=\n 'Bond distance', description=\n 'Set max distance for bonds to occur', default=2, min=0.0, max=\n 10.0, precision=2)\n bpy.types.Scene.atom_name = bpy.props.BoolProperty(name=\n 'Atom names', description='Display the name of atoms')\n bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(name=\n 'Draw lattice', description='Draw unit cell outline')\n bpy.types.Scene.print_data = bpy.props.BoolProperty(name=\n 'Print data', description='Print crystal data in terminal')\n selection_style = [('SPACE FILLING', 'SPACE FILLING', '', 1), (\n 'BALL AND STICK', 'BALL AND STICK', '', 2), ('STICK', 'STICK',\n '', 3)]\n bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(items\n =selection_style, name='Style')\n selection_qual = [('MIN', 'MIN', '', 1), ('LOW', 'LOW', '', 2), (\n 'MED', 'MED', '', 3), ('HIGH', 'HIGH', '', 4), ('MAX', 'MAX',\n '', 5)]\n bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(items\n =selection_qual, name='Quality', default='MED')\n bpy.types.Scene.add_camera = bpy.props.BoolProperty(name=\n 'Place camera', description=\n 'Place a camera and light to make rendering possible')\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\nclass Panel(bpy.types.Panel):\n bl_idname = 'CDTB_Panel'\n bl_label = 'CDTB_Panel'\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_context = 'objectmode'\n bl_category = 'CDTB'\n\n def draw(self, context):\n scn = context.scene\n layout = self.layout\n layout.label(text='Input file', icon_value=112)\n \"\"\"\n for i in range(100):\n layout.label(text = str(i),icon_value =i)\n \"\"\"\n box = layout.box()\n row = box.row()\n splitrow = row.split(factor=0.075)\n left_col = splitrow.column()\n right_col = splitrow.column()\n left_col.operator('error.scan_file', icon_value=108, text='')\n right_col.label(text=file_path.rsplit('\\\\', 2)[-1])\n layout.label(text='Settings', icon_value=117)\n box = layout.box()\n box.prop(scn, 'draw_bonds')\n box.prop(scn, 'bond_distance')\n box.prop(scn, 'draw_lattice')\n box.prop(scn, 'atom_name')\n box.prop(scn, 'print_data')\n box.prop(scn, 'style_selection_mode')\n box.prop(scn, 'quality_selection_mode')\n box.prop(scn, 'add_camera')\n layout.separator()\n splitrow = layout.split(factor=0.3)\n col = splitrow.column()\n col.operator('object.cdtb_operator', text='Draw Crystal')\n col = splitrow.column()\n col.label(text=user_feedback)\n layout.separator()\n\n @classmethod\n def register(cls):\n print('Registered class: %s ' % cls.bl_label)\n\n @classmethod\n def unregister(cls):\n print('Unregistered class: %s ' % cls.bl_label)\n\n\n<mask token>\n\n\ndef unregister():\n bpy.utils.unregister_class(Operator)\n bpy.utils.unregister_class(Panel)\n bpy.utils.unregister_class(ScanFileOperator)\n\n\nclass Crysdata:\n\n def __init__(self, F, cb):\n self.start = time.time()\n print('Draw timer started')\n self.name = F\n self.cell = Cell(cb)\n self.atoms = readEl(cb)\n self.pos = readPos(cb)\n c = self.cell\n self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen, c.blen,\n c.clen, c.alpha, c.beta, c.gamma)\n\n def printout(self):\n print(self.name)\n print()\n self.cell.printout()\n print()\n for element in self.pos:\n element.printout()\n print()\n for element in self.atoms:\n element.printout()\n print()\n print('Fractional to cartesian matrix:')\n print(self.ftoc)\n\n def get_fractional_to_cartesian_matrix(self, a, b, c, alpha, beta, gamma):\n \"\"\"\n Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a\n\n !changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates\n\n Return the transformation matrix that converts fractional coordinates to\n cartesian coordinates.\n Parameters\n ----------\n a, b, c : float\n The lengths of the edges.\n alpha, gamma, beta : float\n The angles between the sides.\n angle_in_degrees : bool\n True if alpha, beta and gamma are expressed in degrees.\n Returns\n -------\n r : array_like\n The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.\n \"\"\"\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n cosa = np.cos(alpha)\n sina = np.sin(alpha)\n cosb = np.cos(beta)\n sinb = np.sin(beta)\n cosg = np.cos(gamma)\n sing = np.sin(gamma)\n volume = (1.0 - cosa ** 2.0 - cosb ** 2.0 - cosg ** 2.0 + 2.0 *\n cosa * cosb * cosg)\n volume = a * b * c * np.sqrt(volume)\n r = np.zeros((3, 3))\n r[0, 0] = float(a)\n r[0, 1] = float(b * cosg)\n r[0, 2] = float(c * cosb)\n r[1, 0] = float(0)\n r[1, 1] = float(b * sing)\n r[1, 2] = float(c * (cosa - cosb * cosg) / sing)\n r[2, 0] = float(0)\n r[2, 1] = float(0)\n r[2, 2] = float(volume / (a * b * sing))\n return r\n\n def drawCrystal(self):\n if draw_lattice:\n self.drawCell()\n print('Lattice drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n self.drawAtoms()\n print('Atoms drawn after {:.3f} seconds'.format(time.time() - self.\n start))\n if draw_bonds:\n self.drawBonds()\n print('Bonds drawn after {:.3f} seconds'.format(time.time() -\n self.start))\n\n def drawAtoms(self):\n for a in self.atoms:\n a.drawObj(self.ftoc)\n print('Atoms drawn:', len(self.atoms))\n\n def drawCell(self):\n cell_corners = []\n cell_edges = []\n for i in range(2):\n for j in range(2):\n for k in range(2):\n bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,\n location=toCarth(self.ftoc, [i, j, k]))\n activeObject = bpy.context.active_object\n cell_corners.append(activeObject)\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0\n ]\n for i, j in zip([0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 6], [1, 2, 4, 3, \n 5, 3, 6, 7, 5, 6, 7, 7]):\n cell_edges.append(self.drawLine(cell_corners[i].location,\n cell_corners[j].location))\n for i in cell_corners:\n i.select_set(action='SELECT')\n for i in cell_edges:\n i.select_set(action='SELECT')\n bpy.context.view_layer.objects.active = cell_corners[0]\n bpy.ops.object.join()\n print('Cell box drawn')\n\n def drawLine(self, ac, tc):\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[\n draw_quality], radius=lattice_size, depth=dist, location=(dx / \n 2 + ac[0], dy / 2 + ac[1], dz / 2 + ac[2]))\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [0, 0, 0]\n phi = math.atan2(dy, dx)\n theta = math.acos(dz / dist)\n bpy.context.object.rotation_euler[1] = theta\n bpy.context.object.rotation_euler[2] = phi\n return activeObject\n\n def drawBonds(self):\n cnt = 0\n bpy.ops.curve.primitive_bezier_circle_add(location=(0, 0, 0),\n radius=bond_radius)\n bpy.context.object.name = 'bez'\n for atom in self.atoms:\n for target in self.atoms:\n if atom != target:\n if 'bond{}-{}'.format(target.elid, atom.elid\n ) in bpy.data.objects:\n continue\n if atom.sym == 'H' and target.sym == 'H':\n continue\n if calcDistance(self.ftoc, atom, target) <= bond_distance:\n self.makeBond(atom, target)\n cnt += 1\n print('Atom bonds drawn:', cnt)\n\n def makeBond(self, atom, target):\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n o1 = bpy.data.objects[atom.elid]\n o2 = bpy.data.objects[target.elid]\n bond = self.hookCurve(o1, o2, bpy.context.scene)\n bpy.context.object.data.bevel_object = bpy.data.objects['bez']\n bpy.context.object.name = 'bond{}-{}'.format(atom.elid, target.elid)\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n bpy.context.object.active_material.diffuse_color = [255, 255, 255]\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n\n def hookCurve(self, o1, o2, scn):\n curve = bpy.data.curves.new('link', 'CURVE')\n curve.dimensions = '3D'\n spline = curve.splines.new('BEZIER')\n spline.bezier_points.add(1)\n p0 = spline.bezier_points[0]\n p1 = spline.bezier_points[1]\n p0.handle_right_type = 'VECTOR'\n p1.handle_left_type = 'VECTOR'\n obj = bpy.data.objects.new('link', curve)\n m0 = obj.modifiers.new('alpha', 'HOOK')\n m0.object = o1\n m1 = obj.modifiers.new('beta', 'HOOK')\n m1.object = o2\n bpy.context.collection.objects.link(obj)\n bpy.context.view_layer.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p0.select_control_point = True\n p1.select_control_point = False\n bpy.ops.object.hook_assign(modifier='alpha')\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p1.select_control_point = True\n p0.select_control_point = False\n bpy.ops.object.hook_assign(modifier='beta')\n return obj\n\n\nclass Cell:\n\n def __init__(self, cb):\n self.alen = float(cb['_cell_length_a'])\n self.blen = float(cb['_cell_length_b'])\n self.clen = float(cb['_cell_length_c'])\n self.alpha = float(cb['_cell_angle_alpha'])\n self.beta = float(cb['_cell_angle_beta'])\n self.gamma = float(cb['_cell_angle_gamma'])\n\n def printout(self):\n print(\n 'alen:{:8} \\nblen:{:8} \\nclen:{:8} \\nalpha:{:8} \\nbeta: {:8} \\ngamma:{:8}'\n .format(self.alen, self.blen, self.clen, self.alpha, self.beta,\n self.gamma))\n\n\nclass Atom:\n\n def __init__(self, elid, sym, xpos, ypos, zpos):\n self.elid = elid\n self.sym = sym\n self.xpos = float(xpos)\n self.ypos = float(ypos)\n self.zpos = float(zpos)\n\n def printout(self):\n print('id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}'.format(self.\n elid, self.sym, self.xpos, self.ypos, self.zpos))\n\n def drawObj(self, ftoc):\n size = sizedic[self.sym] * styledic[draw_style][0\n ] + bond_radius * styledic[draw_style][1]\n bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[\n draw_quality], ring_count=qualitydic[draw_quality] / 2, size=\n size, location=toCarth(ftoc, [self.xpos, self.ypos, self.zpos]))\n bpy.context.object.name = self.elid\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name='MaterialName')\n activeObject.data.materials.append(mat)\n if atom_name:\n bpy.context.object.show_name = True\n if atom_color:\n bpy.context.object.active_material.diffuse_color = colordic[self\n .sym]\n else:\n bpy.context.object.active_material.diffuse_color = [1, 1, 1]\n\n\nclass sympos:\n\n def __init__(self, string):\n self.xsym = string[0].split(',')[0]\n self.ysym = string[0].split(',')[1]\n self.zsym = string[0].split(',')[2]\n\n def printout(self):\n print('x:{:8} y:{:8} z:{:8}'.format(self.xsym, self.ysym, self.zsym))\n\n\ndef readEl(cb):\n elements = []\n previd = []\n idcnt = []\n lb = cb.GetLoop('_atom_site_label')\n for el in lb:\n flag = False\n for i in range(len(previd)):\n if el[0] == previd[i]:\n flag = True\n break\n if flag:\n idcnt[i] += 1\n else:\n previd.append(el[0])\n idcnt.append(0)\n i = len(idcnt) - 1\n id_t = '{}.{}'.format(el[0], idcnt[i])\n elements.append(Atom(id_t, el[1], el[2], el[3], el[4]))\n return elements\n\n\ndef readPos(cb):\n positions = []\n lb = cb.GetLoop('_symmetry_equiv_pos_as_xyz')\n for el in lb:\n positions.append(sympos(el))\n return positions\n\n\n<mask token>\n\n\ndef look_at(obj_camera, point):\n loc_camera = obj_camera.matrix_world.to_translation()\n direction = point - loc_camera\n rot_quat = direction.to_track_quat('-Z', 'Y')\n obj_camera.rotation_euler = rot_quat.to_euler()\n\n\ndef addCamera(x, y, z):\n bpy.ops.object.camera_add(view_align=True, enter_editmode=False,\n location=(5 * x, 5 * y, 5 * z))\n print('camera added')\n bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))\n obj_camera = bpy.data.objects['Camera']\n look_at(obj_camera, Vector([0, 0, z / 4]))\n obj_camera.data.type = 'ORTHO'\n obj_camera.data.ortho_scale = x + y + z\n\n\ndef clearWS():\n if 'OBJECT' != bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False)\n for i in bpy.data.curves:\n bpy.data.curves.remove(i)\n for m in bpy.data.materials:\n bpy.data.materials.remove(m)\n for c in bpy.data.cameras:\n bpy.data.cameras.remove(c)\n print('Workspace cleared.')\n return\n\n\n<mask token>\n",
"step-5": "# -------------------------------------------\n# MODULES\n# -------------------------------------------\nimport sys\nimport platform\nif(platform.system()== \"Windows\"):\n\tdir_sep = \"\\\\\"\nelse:\n\tdir_sep = \"/\"\nimport time\nimport os\nimport numpy as np\nimport subprocess\nimport math\nfrom mathutils import Vector\ntry:\n from CifFile import CifFile\n pars_check = False\nexcept:\n print(\"PyCIFRW not installed, try: pip install PyCifRW\")\n pars_check = True\ntry:\n import bpy\n Blender_env = True\nexcept:\n print(\"Not in blender environment.\")\n\n# -------------------------------------------\n# VARIABLES\n# -------------------------------------------\n\n# global variables\nfile_path = \"Select a file\" # path to CIF-file\ndraw_bonds = False # draws bonds between atoms\ndraw_style = \"SPACE FILLING\" # sets draw style\ndraw_quality = \"MED\" # sets key for qualitydic\ndraw_lattice = False # draws unit cell outline\natom_name = False # displays names of atoms\nbond_distance = 2 # set the max distance between bound atoms\nlattice_size = 0.03 # sets size of lattice borders\nbond_radius = 0.05 # radius of bond\nadd_camera\t =\tTrue\t\t\t# render final image\natom_color\t\t=\tTrue\t\t\t# draw atoms in color\nuser_feedback = \"\" # feedback for the user\nprint_data = True\n\n\n# dictionaries\n# sets detail of spheres\nstyledic = {\n \"SPACE FILLING\" : [1,0],\n \"BALL AND STICK\" : [0.5,0],\n \"STICK\" : [0,1]\n }\n\n# sets detail of spheres\nqualitydic = {\n \"MIN\" : 8,\n \"LOW\" : 16,\n \"MED\" : 32,\n \"HIGH\" : 64,\n \"MAX\" : 128\n }\n\n'''\nUncomment this when no external dictionaries are found\n# dictionary which couples atoms to a color\ncolordic = {\n \"O\" : [1,0,0],\n \"Si\" : [0.25,0.25,1],\n \"Fe\" : [1,0.2,0.2],\n }\n\n# dictionary which couples atoms to a specific size\nsizedic = {\n \"O\" : 0.3,\n \"Si\" : 0.6,\n \"Fe\" : 1.4,\n }\n'''\n# Read in dictionaries from external files\n\n\n\npath = os.path.dirname(os.path.realpath(__file__))\n# dictionary which couples atoms to a color\n# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments\n# data can be changed by modifying the values in colordic.txt\nwith open(path+dir_sep+'colordic.txt','r') as inf:\n colordic = eval(inf.read())\n\n# dictionary which couples atoms to a specific size\n# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)\n# data can be changed by modifying the values in sizedic.txt\nwith open(path+dir_sep+'sizedic.txt','r') as inf:\n sizedic = eval(inf.read())\n\n\n# ----------------------------------------------\n# BLENDER ADD-ON\n# ----------------------------------------------\n\n# add-on info\nbl_info = {\n \"name\": \"Crystallographic Drawing Tool for Blender\",\n \"description\": \"Add-on for drawing crystals from CIF-files.\",\n \"author\": \"Jarrit Boons\",\n \"blender\": (2, 80,0),\n \"location\": \"View3D\",\n \"category\": \"Crystallography in Blender\"\n}\n\n\n# Operator to open the file browser and select a file\nclass ScanFileOperator(bpy.types.Operator):\n\n bl_idname = \"error.scan_file\"\n bl_label = \"Scan file for return\"\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n\n def execute(self, context):\n\n global file_path\n global user_feedback\n user_feedback = \"\"\n file_path = self.filepath\n return {'FINISHED'}\n\n\n def invoke(self, context, event):\n\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\n\n def register():\n\n bpy.types.Scene.path_to_file = bpy.props.StringProperty(\n name=\"\",\n description=\"Path to CIF file\",\n default = \"empty\"\n )\n\n# Operator to hold CDTB-data and program execution\nclass Operator(bpy.types.Operator):\n\n bl_idname = \"object.cdtb_operator\"\n bl_label = \"CDTB_operator\"\n bl_descriptor = \"Operator for drawing crystal\"\n\n # Runs the whole program\n def execute(self, context):\n global pars_check\n global user_feedback\n\n if(pars_check):\n user_feedback = \"CiFFile module not installed\"\n return {'FINISHED'}\n\n if(file_path == \"Select a file\"):\n print(\"No file selected\")\n user_feedback = \"No File selected\"\n else:\n user_feedback = \"Crystal drawn\"\n\n global draw_bonds\n draw_bonds = context.scene.draw_bonds\n\n global bond_distance\n bond_distance = context.scene.bond_distance\n\n global draw_lattice\n draw_lattice = context.scene.draw_lattice\n\n global atom_name\n atom_name = context.scene.atom_name\n\n global print_data\n print_data = context.scene.print_data\n\n global draw_style\n global atom_color\n draw_style = context.scene.style_selection_mode\n if(draw_style==\"STICK\"):\n draw_bonds = True\n atom_color = False\n else:\n atom_color = True\n\n global draw_quality\n draw_quality = context.scene.quality_selection_mode\n global add_camera\n add_camera = context.scene.add_camera\n drawCrystal(file_path)\n\n return {'FINISHED'}\n\n\n @classmethod\n def register(cls):\n\n print(\"Registered class: %s \" % cls.bl_label)\n bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(\n name=\"Draw bonds\",\n description=\"Draw bonds between elements\"\n )\n\n bpy.types.Scene.bond_distance = bpy.props.FloatProperty(\n name=\"Bond distance\",\n description=\"Set max distance for bonds to occur\",\n default=2,\n min=0.0,\n max=10.0,\n precision=2\n )\n\n bpy.types.Scene.atom_name = bpy.props.BoolProperty(\n name=\"Atom names\",\n description=\"Display the name of atoms\"\n )\n\n bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(\n name=\"Draw lattice\",\n description=\"Draw unit cell outline\"\n )\n\n bpy.types.Scene.print_data = bpy.props.BoolProperty(\n name=\"Print data\",\n description=\"Print crystal data in terminal\"\n )\n\n # Dropdown menu for drawing style\n selection_style = [\n (\"SPACE FILLING\", \"SPACE FILLING\", \"\", 1),\n (\"BALL AND STICK\", \"BALL AND STICK\", \"\", 2),\n (\"STICK\", \"STICK\", \"\", 3),\n ]\n\n bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(\n items=selection_style,\n name=\"Style\"\n )\n\n # Dropdown menu for drawing quality\n selection_qual = [\n (\"MIN\", \"MIN\", \"\", 1),\n (\"LOW\", \"LOW\", \"\", 2),\n (\"MED\", \"MED\", \"\", 3),\n (\"HIGH\", \"HIGH\", \"\", 4),\n (\"MAX\", \"MAX\", \"\", 5)\n ]\n\n bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(\n items=selection_qual,\n name=\"Quality\",\n default=\"MED\"\n )\n bpy.types.Scene.add_camera = bpy.props.BoolProperty(\n name=\"Place camera\",\n description=\"Place a camera and light to make rendering possible\"\n )\n\n\n @classmethod\n def unregister(cls):\n\n print(\"Unregistered class: %s \" % cls.bl_label)\n\n# Panel to display add-on in Blender environment\nclass Panel(bpy.types.Panel):\n\n bl_idname = \"CDTB_Panel\"\n bl_label = \"CDTB_Panel\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"TOOLS\"\n bl_context = \"objectmode\"\n bl_category = \"CDTB\"\n\n def draw(self,context):\n\n scn = context.scene\n layout = self.layout\n layout.label(text = 'Input file',icon_value=112)\n\n '''\n for i in range(100):\n layout.label(text = str(i),icon_value =i)\n '''\n\n box = layout.box()\n row = box.row()\n splitrow = row.split(factor=0.075)\n left_col = splitrow.column()\n right_col = splitrow.column()\n left_col.operator('error.scan_file',icon_value=108,text=\"\")\n right_col.label(text=file_path.rsplit('\\\\', 2)[-1])\n layout.label(text = 'Settings',icon_value =117)\n box = layout.box()\n box.prop(scn,'draw_bonds')\n box.prop(scn,'bond_distance')\n box.prop(scn,'draw_lattice')\n box.prop(scn, 'atom_name')\n box.prop(scn,'print_data')\n box.prop(scn, 'style_selection_mode')\n box.prop(scn, 'quality_selection_mode')\n box.prop(scn, 'add_camera')\n layout.separator()\n splitrow = layout.split(factor=0.3)\n col = splitrow.column()\n col.operator('object.cdtb_operator',text=\"Draw Crystal\")\n col = splitrow.column()\n col.label(text=user_feedback)\n layout.separator()\n\n\n @classmethod\n def register(cls):\n\n print(\"Registered class: %s \" % cls.bl_label)\n\n\n @classmethod\n def unregister(cls):\n\n print(\"Unregistered class: %s \" % cls.bl_label)\n\n\ndef register():\n\n bpy.utils.register_class(Operator)\n bpy.utils.register_class(ScanFileOperator)\n bpy.utils.register_class(Panel)\n\n\ndef unregister():\n\n bpy.utils.unregister_class(Operator)\n bpy.utils.unregister_class(Panel)\n bpy.utils.unregister_class(ScanFileOperator)\n\n\n#----------------------------------------------\n# MAIN PROGRAM\n#----------------------------------------------\n\n\nclass Crysdata():\n\n def __init__(self,F,cb):\n\n self.start = time.time()\n print(\"Draw timer started\")\n self.name = F\n self.cell = Cell(cb)\n self.atoms = readEl(cb)\n self.pos = readPos(cb)\n c = self.cell\n self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)\n\n\n def printout(self):\n\n print(self.name)\n print()\n self.cell.printout()\n print()\n for element in self.pos:\n element.printout()\n print()\n for element in self.atoms:\n element.printout()\n print()\n print(\"Fractional to cartesian matrix:\")\n print(self.ftoc)\n\n\n def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):\n\n \"\"\"\n Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a\n\n !changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates\n\n Return the transformation matrix that converts fractional coordinates to\n cartesian coordinates.\n Parameters\n ----------\n a, b, c : float\n The lengths of the edges.\n alpha, gamma, beta : float\n The angles between the sides.\n angle_in_degrees : bool\n True if alpha, beta and gamma are expressed in degrees.\n Returns\n -------\n r : array_like\n The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.\n \"\"\"\n\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n cosa = np.cos(alpha)\n sina = np.sin(alpha)\n cosb = np.cos(beta)\n sinb = np.sin(beta)\n cosg = np.cos(gamma)\n sing = np.sin(gamma)\n volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg\n volume = a*b*c*np.sqrt(volume)\n r = np.zeros((3, 3))\n r[0, 0] = float(a)\n r[0, 1] = float(b * cosg)\n r[0, 2] = float(c * cosb)\n r[1, 0] = float(0)\n r[1, 1] = float(b * sing)\n r[1, 2] = float(c * (cosa - cosb * cosg) / sing)\n r[2, 0] = float(0)\n r[2, 1] = float(0)\n r[2, 2] = float(volume / (a*b*sing))\n return r\n\n\n def drawCrystal(self):\n\n if draw_lattice:\n self.drawCell()\n print(\"Lattice drawn after {:.3f} seconds\".format((time.time()-self.start)))\n self.drawAtoms()\n print(\"Atoms drawn after {:.3f} seconds\".format((time.time()-self.start)))\n if(draw_bonds):\n self.drawBonds()\n print(\"Bonds drawn after {:.3f} seconds\".format((time.time()-self.start)))\n\n\n def drawAtoms(self):\n\n for a in self.atoms:\n a.drawObj(self.ftoc)\n print(\"Atoms drawn:\",len(self.atoms))\n\n\n def drawCell(self):\n\n cell_corners=[]\n cell_edges=[]\n # calculate and draw corners\n for i in range(2):\n for j in range(2):\n for k in range(2):\n bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))\n activeObject = bpy.context.active_object # Set active object to variable\n cell_corners.append(activeObject)\n mat = bpy.data.materials.new(name=\"MaterialName\") # set new material to variable\n activeObject.data.materials.append(mat) # add the material to the object\n bpy.context.object.active_material.diffuse_color = [0,0,0] # change color\n # draw lines\n for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):\n cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))\n # select all line and corners\n for i in cell_corners:\n i.select_set(action=\"SELECT\")\n for i in cell_edges:\n i.select_set(action=\"SELECT\")\n # set corner in origin as active and join meshes as one object\n bpy.context.view_layer.objects.active = cell_corners[0]\n bpy.ops.object.join()\n\n print(\"Cell box drawn\")\n\n\n def drawLine(self,ac,tc):\n\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx**2 + dy**2 + dz**2)\n bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))\n activeObject = bpy.context.active_object\n mat = bpy.data.materials.new(name=\"MaterialName\") # set new material to variable\n activeObject.data.materials.append(mat) # add the material to the object\n bpy.context.object.active_material.diffuse_color = [0,0,0] # change color\n\n phi = math.atan2(dy, dx)\n theta = math.acos(dz/dist)\n\n bpy.context.object.rotation_euler[1] = theta\n bpy.context.object.rotation_euler[2] = phi\n return activeObject\n\n\n def drawBonds(self):\n\n cnt = 0\n bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)\n bpy.context.object.name = 'bez'\n for atom in self.atoms:\n for target in self.atoms:\n if atom != target:\n if(\"bond{}-{}\".format(target.elid,atom.elid)in bpy.data.objects):\n continue\n if(atom.sym == 'H' and target.sym == 'H'):\n continue\n if calcDistance(self.ftoc,atom,target) <= bond_distance:\n self.makeBond(atom,target)\n cnt += 1\n print(\"Atom bonds drawn:\",cnt)\n\n\n # This function hooks the bond to the atoms\n def makeBond(self,atom,target):\n\n if 'OBJECT'!=bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n o1 = bpy.data.objects[atom.elid]\n o2 = bpy.data.objects[target.elid]\n bond = self.hookCurve(o1,o2, bpy.context.scene)\n bpy.context.object.data.bevel_object = bpy.data.objects[\"bez\"]\n bpy.context.object.name = \"bond{}-{}\".format(atom.elid,target.elid)\n activeObject = bpy.context.active_object # Set active object to variable\n mat = bpy.data.materials.new(name=\"MaterialName\") # set new material to variable\n activeObject.data.materials.append(mat) # add the material to the object\n bpy.context.object.active_material.diffuse_color = [255,255,255] # change color\n if 'OBJECT'!=bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n\n\n def hookCurve(self,o1, o2, scn):\n\n curve = bpy.data.curves.new(\"link\", 'CURVE')\n curve.dimensions = '3D'\n spline = curve.splines.new('BEZIER')\n\n spline.bezier_points.add(1)\n p0 = spline.bezier_points[0]\n p1 = spline.bezier_points[1]\n # p0.co = o1.location\n p0.handle_right_type = 'VECTOR'\n # p1.co = o2.location\n p1.handle_left_type = 'VECTOR'\n\n\n obj = bpy.data.objects.new(\"link\", curve)\n m0 = obj.modifiers.new(\"alpha\", 'HOOK')\n m0.object = o1\n m1 = obj.modifiers.new(\"beta\", 'HOOK')\n m1.object = o2\n\n bpy.context.collection.objects.link(obj)\n bpy.context.view_layer.objects.active = obj\n\n bpy.ops.object.mode_set(mode='EDIT')\n\n # Reassign the points\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n\n # Hook first control point to first atom\n p0.select_control_point = True\n p1.select_control_point = False\n bpy.ops.object.hook_assign(modifier=\"alpha\")\n\n # Hook second control point to first atom\n p0 = curve.splines[0].bezier_points[0]\n p1 = curve.splines[0].bezier_points[1]\n p1.select_control_point = True\n p0.select_control_point = False\n bpy.ops.object.hook_assign(modifier=\"beta\")\n\n return obj\n\n\nclass Cell():\n\n def __init__(self,cb):\n\n self.alen = float(cb[\"_cell_length_a\"])\n self.blen = float(cb[\"_cell_length_b\"])\n self.clen = float(cb[\"_cell_length_c\"])\n self.alpha = float(cb[\"_cell_angle_alpha\"])\n self.beta = float(cb[\"_cell_angle_beta\"])\n self.gamma = float(cb[\"_cell_angle_gamma\"])\n\n\n def printout(self):\n\n print(\"alen:{:8} \\nblen:{:8} \\nclen:{:8} \\nalpha:{:8} \\nbeta: {:8} \\ngamma:{:8}\".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))\n\n\n\n\nclass Atom():\n\n def __init__(self,elid,sym,xpos,ypos,zpos):\n\n self.elid = elid\n self.sym = sym\n self.xpos = float(xpos)\n self.ypos = float(ypos)\n self.zpos = float(zpos)\n\n\n def printout(self):\n\n print(\"id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}\".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))\n\n\n def drawObj(self,ftoc):\n size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]\n bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))\n bpy.context.object.name = self.elid\n activeObject = bpy.context.active_object # Set active object to variable\n mat = bpy.data.materials.new(name=\"MaterialName\") # set new material to variable\n activeObject.data.materials.append(mat) # add the material to the object\n if(atom_name):\n bpy.context.object.show_name = True\n if(atom_color):\n bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color\n else:\n bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white\n\n\nclass sympos():\n\n def __init__(self,string):\n\n self.xsym = (string[0].split(','))[0]\n self.ysym = (string[0].split(','))[1]\n self.zsym = (string[0].split(','))[2]\n\n\n def printout(self):\n\n print(\"x:{:8} y:{:8} z:{:8}\".format(self.xsym,self.ysym,self.zsym))\n\n\n\ndef readEl(cb):\n\n elements = []\n previd = []\n idcnt = []\n lb = cb.GetLoop(\"_atom_site_label\")\n for el in lb:\n flag = False\n for i in range(len(previd)):\n if(el[0] == previd[i]):\n flag = True\n break\n if(flag):\n idcnt[i] += 1\n else:\n previd.append(el[0])\n idcnt.append(0)\n i = len(idcnt)-1\n id_t = \"{}.{}\".format(el[0],idcnt[i])\n elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))\n return elements\n\n\ndef readPos(cb):\n\n positions = [];\n lb = cb.GetLoop(\"_symmetry_equiv_pos_as_xyz\")\n for el in lb:\n positions.append(sympos(el))\n return positions\n\n\ndef obabel_fill_unit_cell(cif_file, p1_file):\n\n # Convert symmetry to P1 using openbabel as subprocess\n # Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]\n subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])\n\n\ndef calcDistance(ftoc,atom1,atom2):\n\n ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])\n tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])\n dx = tc[0] - ac[0]\n dy = tc[1] - ac[1]\n dz = tc[2] - ac[2]\n dist = np.sqrt(dx**2 + dy**2 + dz**2)\n return dist\n\n\ndef toCarth(ftoc,V_frac):\n\n return np.dot(ftoc, V_frac)\n\n\ndef look_at(obj_camera, point):\n\n loc_camera = obj_camera.matrix_world.to_translation()\n direction = point - loc_camera\n # point the cameras '-Z' and use its 'Y' as up\n rot_quat = direction.to_track_quat('-Z', 'Y')\n # assume we're using euler rotation\n obj_camera.rotation_euler = rot_quat.to_euler()\n\n\ndef addCamera(x,y,z):\n\n bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))\n print(\"camera added\")\n bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))\n obj_camera = bpy.data.objects[\"Camera\"]\n look_at(obj_camera, Vector([0,0,z/4]))\n obj_camera.data.type = 'ORTHO'\n obj_camera.data.ortho_scale = ((x+y+z))\n\n\ndef clearWS():\n\n if 'OBJECT'!=bpy.context.mode:\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False)\n # remove all previous curves\n for i in bpy.data.curves:\n bpy.data.curves.remove(i)\n # remove all previous materials\n for m in bpy.data.materials:\n bpy.data.materials.remove(m)\n # remove all previous camera's\n for c in bpy.data.cameras:\n bpy.data.cameras.remove(c)\n\n print(\"Workspace cleared.\")\n return\n\n\ndef drawCrystal(file):\n # Check if file is file:\n S = time.time()\n global user_feedback\n ext = file[len(file)-4:]\n if(ext.lower() != \".cif\"):\n print(\"Only cif files can be visualised\")\n user_feedback = \"Not a cif file\"\n return\n # Check OpenBabel installation\n try:\n # Convert the cif file to its P1 symmetry notation as a temporary cif file\n print('Converting %s to P1' %file)\n obabel_fill_unit_cell(file, \"temp.CIF\")\n cf = CifFile(\"temp.CIF\")\n except:\n print(\"No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation\")\n user_feedback = \"OpenBabel not installed\"\n #cf = CifFile(file) CifFile apparently can't read in long filepaths\n return\n # Open and parse our cif\n f = file.rsplit(dir_sep, 1)[-1]\n F = f[:3]\n print(f)\n cb = cf.first_block()\n Crystal = Crysdata(F,cb)\n\n # Print crystal data in terminal if checked\n if(print_data):\n Crystal.printout()\n\n print(\"Crystal data read after \"+ str(time.time() - S) + \" seconds\")\n\n # Draw crystal if in Blender environment\n if(Blender_env):\n clearWS()\n Crystal.drawCrystal()\n bpy.ops.object.select_all(action='DESELECT')\n if(add_camera):\n addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)\n",
"step-ids": [
20,
32,
41,
42,
51
]
}
|
[
20,
32,
41,
42,
51
] |
#!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import os
import sys
import json
import logging
import tempfile
import itertools
import traceback
import subprocess as sp
from os.path import basename
from datetime import datetime
from argparse import ArgumentParser, FileType
PREPROC_CMDS = {
'exon': "awk '$3 == \"exon\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"exon\";print}}' > {output}",
'gene': "awk '$3 == \"gene\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"gene\";print}}' > {output}",
'intron': "subtractBed -a {input[0]} -b {input[1]} | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF)=\"intron\";print}}' > {output}",
'intergenic': "complementBed -i {input[0]} -g <(cut -f 1-2 {input[1]} | sort -k1,1) | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"intergenic\";print}}' > {output}"
}
def strfdelta(tdelta, fmt):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def preprocess(element, inputs=None):
'''element can be one of <gene> <exon> <intron> <intergenic>'''
log = logging.getLogger('gencov')
element_bed = tempfile.mkstemp(suffix='.bed')[1]
if not inputs:
inputs = [ args.annotation ]
else:
inputs = inputs[element]
command = PREPROC_CMDS[element].format(input=inputs, output=element_bed)
log.debug(command)
proc = sp.Popen(command, shell=True, executable='/bin/bash', stderr=sp.PIPE)
err_msg = proc.communicate()[1]
if err_msg:
raise IOError(err_msg)
log.info("%s preprocessed" % element.title())
return element_bed
def gtf_processing(genome=None, prefix='gencov'):
"""Annotation preprocessing. Provide a bed file with the
following elements:
- projected exons
- projected genes
- introns
- integenic regions
"""
all_bed = prefix + ".all.bed"
if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:
log.info("Preprocessing annotation...")
features = ('exon', 'gene', 'intron', 'intergenic')
merged_exons, merged_genes = map(preprocess, features[:2])
ins = {
'intron': [merged_genes, merged_exons],
'intergenic': [merged_genes, genome]
}
intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])
log.info("Concatenate bed files for all elements...")
with open(all_bed, 'w') as out_bed:
cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)
for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):
os.remove(f)
return all_bed
def cat_all(*args, **kwargs):
out_bed = kwargs.get('out_bed', sys.stdout)
for bed in args:
print(open(bed,'r').read(), end='', file=out_bed)
def get_chromosomes(genome_file):
with open(genome_file) as genome:
chrs = [l.split()[0] for l in genome]
return chrs
def process_bam(bam, all_elements, chrs=None, all_reads=False):
if not os.path.exists(bam):
raise IOError("Fail to open {0!r} for reading".format(bam))
bai = "{0}.bai".format(bam)
if chrs and not os.path.exists(bai):
log.info("Indexing {0}...".format(bam))
sp.call('samtools index {0}'.format(bam), shell=True)
log.info('Processing {0}...'.format(bam))
command = "samtools view -u"
sam_filter = 4
if not all_reads:
sam_filter += 256
command += " -F {0} {1}".format(str(sam_filter), bam)
if chrs:
command += " {0}".format(" ".join(chrs))
command = "{0} | bamToBed -i stdin -tag NH -bed12 | intersectBed -a stdin -b {1} -split -wao".format(command, all_elements)
log.debug(command)
return sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=1)
def update_counts(element, tot_counts, cont_counts, split_counts, is_split):
elem='total'
tot_counts[elem] = tot_counts.get(elem,0) + 1
if is_split:
split_counts['total'] = split_counts.get('total',0) + 1
if len(element) > 1:
if len(set(element)) == 1:
elem = element[0]
else:
if 'intergenic' in element:
elem = 'others'
else:
elem = 'exonic_intronic'
else:
elem = element[0]
split_counts[elem] = split_counts.get(elem, 0) + 1
else:
cont_counts['total'] = cont_counts.get('total', 0) + 1
if len(element) > 1:
if 'intergenic' in element:
elem = 'others'
else:
elem = 'exonic_intronic'
else:
elem = element[0]
cont_counts[elem] = cont_counts.get(elem, 0) + 1
def count_features(bed, uniq=False):
# Initialize
n_skipped = {}
newRead = False # keep track of different reads
prev_rid = None # read id of the previous read
is_split = False # check if current read is a split
element = [] # list with all elements intersecting the read
cont_counts = {} # Continuous read counts
split_counts = {} # Split read counts
tot_counts = {} # Total number of reads
o = bed.stdout
log.info("Compute genomic coverage...")
# Iterate
while True:
try:
line = o.next()
if not line:
n_skipped['empty'] = n_skipped.get('gene', 0) + 1
continue
if 'gene' in line:
n_skipped['gene'] = n_skipped.get('gene', 0) + 1
continue
rchr, rstart, rend, rid, rflag, rstrand, rtstart, rtend, rrgb, rbcount, rbsizes, rbstarts, achr, astart, aend, ael, covg = line.strip().split("\t")
if uniq and int(rflag) != 1:
n_skipped['non-uniq'] = n_skipped.get('non-uniq', 0) + 1
continue
newRead = (rid != prev_rid)
if (newRead) and prev_rid!=None:
update_counts(element, tot_counts, cont_counts, split_counts, is_split)
# Re-Initialize the counters
element = []
element.append(ael)
prev_rid = rid
is_split = int(rbcount) > 1
except StopIteration:
update_counts(element, tot_counts, cont_counts, split_counts, is_split)
break
for k,v in n_skipped.iteritems():
log.info("Skipped {1} {0} lines".format(k, v))
return (tot_counts, cont_counts, split_counts)
def write_output(stats, out, output_format='tsv', json_indent=4):
if not args.ID:
args.ID = basename(args.bam)
if output_format == 'tsv':
for k, v in stats.iteritems():
for k1, v1 in v.iteritems():
line_array = [args.ID, k, str(k1), str(v1)]
out.write("\t".join(line_array)+"\n")
elif output_format == 'json':
out.write('Total reads: {0}\n'.format(json.dumps(stats['total'], indent=json_indent)))
out.write('Continuous reads: {0}\n'.format(json.dumps(stats['continuous'], indent=json_indent)))
out.write('Split reads: {0}\n'.format(json.dumps(stats['split'], indent=json_indent)))
def main(args):
bn_bam = os.path.basename(args.bam).rsplit(".", 1)[0]
bn_gtf = os.path.basename(args.annotation).rsplit(".", 1)[0]
start = datetime.now()
all_elements = gtf_processing(genome=args.genome, prefix=bn_bam + "." + bn_gtf)
chrs = None if args.all_chrs else get_chromosomes(args.genome)
if args.uniq:
args.all_reads = False
bed = process_bam(args.bam, all_elements, chrs=chrs, all_reads=args.all_reads)
read_type = "UNIQ" if args.uniq else "ALL" if args.all_reads else "PRIMARY"
chroms = ", ".join(chrs) if chrs else "ALL"
log.info("Chromosomes: {0}".format(str(chroms)))
log.info("Mapped reads: {0}".format(str(read_type)))
tot, cont, split = count_features(bed, uniq=args.uniq)
stats_summary = {"total" : tot, "continuous" : cont, "split" : split}
write_output(stats_summary, args.output, output_format=args.output_format)
end = datetime.now() - start
log.info('DONE ({0})'.format(strfdelta(end, "{hours}h{minutes}m{seconds}s")))
if not args.keep:
os.remove(all_elements)
def parse_arguments(argv):
""" Parsing arguments """
parser = ArgumentParser(argv, description = "Count the number of reads in genomic regions. NOTE: SAMtools and BEDtools must be installed")
parser.add_argument("-a", "--annotation", type=str, help="gtf with all elements (genes, transcripts and exons)", required=True)
parser.add_argument("-g", "--genome", type=str, help="genome chromosome sizes", required=True)
parser.add_argument("-b", "--bam", type=str, help="bam file", required=True)
parser.add_argument("-o", "--output", type=FileType('w'), default=sys.stdout, help="output file name")
parser.add_argument("-I", "--ID", type=str, help="the ID of the experiment, from which the bam comes from")
parser.add_argument("--keep", dest='keep', help="Do not delete the temporary files generated during the run", action='store_true', default=False)
parser.add_argument("--uniq", dest='uniq', action='store_true', help="Only use uniquely mapped reads", default=False)
parser.add_argument("--loglevel", dest='loglevel', help="Set the loglevel", default="info")
parser.add_argument("--all-reads", dest='all_reads', action='store_true', help="Use all reads from the BAM file. Default: use primary alignments only ('samtools view -F 260')", default=False)
parser.add_argument("--output-format", dest='output_format', help="Set the output format", default="tsv")
parser.add_argument("--all-chromosomes", dest='all_chrs', action='store_true', help="Use all chromosomes from the BAM file header. Default: use only chromosomes in the genome index file.", default=False)
return parser.parse_args()
def setup_logger():
""" Logging setup """
log = logging.getLogger("gencov")
log.setLevel(logging.getLevelName(args.loglevel.upper()))
ch = logging.StreamHandler()
ch.setLevel = log.level
fmt = logging.Formatter('%(asctime)s - %(message)s', '%Y-%m-%d %H:%M:%S')
ch.setFormatter(fmt)
log.addHandler(ch)
return log
if __name__ == "__main__":
"""
Given a bam file, compute the read coverage for different genomic regions:
- exons
- introns
- exon-intron junctions
- intergenic
*** ONLY PRIMARY alignments are used ***
"""
try:
args = parse_arguments(sys.argv)
log = setup_logger()
main(args)
exit(0)
except Exception,err:
log.error("Error:")
errinfo = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)
log.error("".join(errinfo))
exit(1)
|
normal
|
{
"blob_id": "ac19ae96d8262cadd43314c29198fccbc008c1b5",
"index": 6590,
"step-1": "#!/usr/bin/env python\n\nfrom __future__ import print_function, division, unicode_literals\nimport os\nimport sys\nimport json\nimport logging\nimport tempfile\nimport itertools\nimport traceback\nimport subprocess as sp\nfrom os.path import basename\nfrom datetime import datetime\nfrom argparse import ArgumentParser, FileType\n\nPREPROC_CMDS = {\n 'exon': \"awk '$3 == \\\"exon\\\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"exon\\\";print}}' > {output}\",\n 'gene': \"awk '$3 == \\\"gene\\\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"gene\\\";print}}' > {output}\",\n 'intron': \"subtractBed -a {input[0]} -b {input[1]} | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF)=\\\"intron\\\";print}}' > {output}\",\n 'intergenic': \"complementBed -i {input[0]} -g <(cut -f 1-2 {input[1]} | sort -k1,1) | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"intergenic\\\";print}}' > {output}\"\n}\n\ndef strfdelta(tdelta, fmt):\n d = {\"days\": tdelta.days}\n d[\"hours\"], rem = divmod(tdelta.seconds, 3600)\n d[\"minutes\"], d[\"seconds\"] = divmod(rem, 60)\n return fmt.format(**d)\n\ndef preprocess(element, inputs=None):\n '''element can be one of <gene> <exon> <intron> <intergenic>'''\n log = logging.getLogger('gencov')\n element_bed = tempfile.mkstemp(suffix='.bed')[1]\n if not inputs:\n inputs = [ args.annotation ]\n else:\n inputs = inputs[element]\n command = PREPROC_CMDS[element].format(input=inputs, output=element_bed)\n\n log.debug(command)\n proc = sp.Popen(command, shell=True, executable='/bin/bash', stderr=sp.PIPE)\n err_msg = proc.communicate()[1]\n if err_msg:\n raise IOError(err_msg)\n\n log.info(\"%s preprocessed\" % element.title())\n return element_bed\n\ndef gtf_processing(genome=None, prefix='gencov'):\n \"\"\"Annotation preprocessing. Provide a bed file with the\n following elements:\n\n - projected exons\n - projected genes\n - introns\n - integenic regions\n\n \"\"\"\n all_bed = prefix + \".all.bed\"\n\n if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:\n log.info(\"Preprocessing annotation...\")\n features = ('exon', 'gene', 'intron', 'intergenic')\n merged_exons, merged_genes = map(preprocess, features[:2])\n ins = {\n 'intron': [merged_genes, merged_exons],\n 'intergenic': [merged_genes, genome]\n }\n intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])\n\n log.info(\"Concatenate bed files for all elements...\")\n with open(all_bed, 'w') as out_bed:\n cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)\n\n for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):\n os.remove(f)\n\n return all_bed\n\ndef cat_all(*args, **kwargs):\n out_bed = kwargs.get('out_bed', sys.stdout)\n for bed in args:\n print(open(bed,'r').read(), end='', file=out_bed)\n\ndef get_chromosomes(genome_file):\n with open(genome_file) as genome:\n chrs = [l.split()[0] for l in genome]\n return chrs\n\ndef process_bam(bam, all_elements, chrs=None, all_reads=False):\n if not os.path.exists(bam):\n raise IOError(\"Fail to open {0!r} for reading\".format(bam))\n bai = \"{0}.bai\".format(bam)\n if chrs and not os.path.exists(bai):\n log.info(\"Indexing {0}...\".format(bam))\n sp.call('samtools index {0}'.format(bam), shell=True)\n\n log.info('Processing {0}...'.format(bam))\n command = \"samtools view -u\"\n sam_filter = 4\n if not all_reads:\n sam_filter += 256\n command += \" -F {0} {1}\".format(str(sam_filter), bam)\n if chrs:\n command += \" {0}\".format(\" \".join(chrs))\n command = \"{0} | bamToBed -i stdin -tag NH -bed12 | intersectBed -a stdin -b {1} -split -wao\".format(command, all_elements)\n log.debug(command)\n return sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=1)\n\ndef update_counts(element, tot_counts, cont_counts, split_counts, is_split):\n elem='total'\n tot_counts[elem] = tot_counts.get(elem,0) + 1\n if is_split:\n split_counts['total'] = split_counts.get('total',0) + 1\n if len(element) > 1:\n if len(set(element)) == 1:\n elem = element[0]\n else:\n if 'intergenic' in element:\n elem = 'others'\n else:\n elem = 'exonic_intronic'\n else:\n elem = element[0]\n\n split_counts[elem] = split_counts.get(elem, 0) + 1\n\n else:\n cont_counts['total'] = cont_counts.get('total', 0) + 1\n if len(element) > 1:\n if 'intergenic' in element:\n elem = 'others'\n else:\n elem = 'exonic_intronic'\n else:\n elem = element[0]\n\n cont_counts[elem] = cont_counts.get(elem, 0) + 1\n\ndef count_features(bed, uniq=False):\n\n # Initialize\n n_skipped = {}\n newRead = False # keep track of different reads\n prev_rid = None # read id of the previous read\n is_split = False # check if current read is a split\n element = [] # list with all elements intersecting the read\n cont_counts = {} # Continuous read counts\n split_counts = {} # Split read counts\n tot_counts = {} # Total number of reads\n\n o = bed.stdout\n\n log.info(\"Compute genomic coverage...\")\n\n # Iterate\n while True:\n try:\n line = o.next()\n if not line:\n n_skipped['empty'] = n_skipped.get('gene', 0) + 1\n continue\n if 'gene' in line:\n n_skipped['gene'] = n_skipped.get('gene', 0) + 1\n continue\n rchr, rstart, rend, rid, rflag, rstrand, rtstart, rtend, rrgb, rbcount, rbsizes, rbstarts, achr, astart, aend, ael, covg = line.strip().split(\"\\t\")\n if uniq and int(rflag) != 1:\n n_skipped['non-uniq'] = n_skipped.get('non-uniq', 0) + 1\n continue\n newRead = (rid != prev_rid)\n if (newRead) and prev_rid!=None:\n update_counts(element, tot_counts, cont_counts, split_counts, is_split)\n # Re-Initialize the counters\n element = []\n\n element.append(ael)\n prev_rid = rid\n is_split = int(rbcount) > 1\n except StopIteration:\n update_counts(element, tot_counts, cont_counts, split_counts, is_split)\n break\n\n for k,v in n_skipped.iteritems():\n log.info(\"Skipped {1} {0} lines\".format(k, v))\n\n return (tot_counts, cont_counts, split_counts)\n\ndef write_output(stats, out, output_format='tsv', json_indent=4):\n if not args.ID:\n args.ID = basename(args.bam)\n\n if output_format == 'tsv':\n for k, v in stats.iteritems():\n for k1, v1 in v.iteritems():\n line_array = [args.ID, k, str(k1), str(v1)]\n out.write(\"\\t\".join(line_array)+\"\\n\")\n elif output_format == 'json':\n out.write('Total reads: {0}\\n'.format(json.dumps(stats['total'], indent=json_indent)))\n out.write('Continuous reads: {0}\\n'.format(json.dumps(stats['continuous'], indent=json_indent)))\n out.write('Split reads: {0}\\n'.format(json.dumps(stats['split'], indent=json_indent)))\n\ndef main(args):\n\n bn_bam = os.path.basename(args.bam).rsplit(\".\", 1)[0]\n bn_gtf = os.path.basename(args.annotation).rsplit(\".\", 1)[0]\n\n start = datetime.now()\n\n all_elements = gtf_processing(genome=args.genome, prefix=bn_bam + \".\" + bn_gtf)\n\n chrs = None if args.all_chrs else get_chromosomes(args.genome)\n if args.uniq:\n args.all_reads = False\n bed = process_bam(args.bam, all_elements, chrs=chrs, all_reads=args.all_reads)\n\n read_type = \"UNIQ\" if args.uniq else \"ALL\" if args.all_reads else \"PRIMARY\"\n chroms = \", \".join(chrs) if chrs else \"ALL\"\n log.info(\"Chromosomes: {0}\".format(str(chroms)))\n log.info(\"Mapped reads: {0}\".format(str(read_type)))\n tot, cont, split = count_features(bed, uniq=args.uniq)\n\n stats_summary = {\"total\" : tot, \"continuous\" : cont, \"split\" : split}\n\n write_output(stats_summary, args.output, output_format=args.output_format)\n\n end = datetime.now() - start\n log.info('DONE ({0})'.format(strfdelta(end, \"{hours}h{minutes}m{seconds}s\")))\n\n if not args.keep:\n os.remove(all_elements)\n\ndef parse_arguments(argv):\n \"\"\" Parsing arguments \"\"\"\n\n parser = ArgumentParser(argv, description = \"Count the number of reads in genomic regions. NOTE: SAMtools and BEDtools must be installed\")\n parser.add_argument(\"-a\", \"--annotation\", type=str, help=\"gtf with all elements (genes, transcripts and exons)\", required=True)\n parser.add_argument(\"-g\", \"--genome\", type=str, help=\"genome chromosome sizes\", required=True)\n parser.add_argument(\"-b\", \"--bam\", type=str, help=\"bam file\", required=True)\n parser.add_argument(\"-o\", \"--output\", type=FileType('w'), default=sys.stdout, help=\"output file name\")\n parser.add_argument(\"-I\", \"--ID\", type=str, help=\"the ID of the experiment, from which the bam comes from\")\n parser.add_argument(\"--keep\", dest='keep', help=\"Do not delete the temporary files generated during the run\", action='store_true', default=False)\n parser.add_argument(\"--uniq\", dest='uniq', action='store_true', help=\"Only use uniquely mapped reads\", default=False)\n parser.add_argument(\"--loglevel\", dest='loglevel', help=\"Set the loglevel\", default=\"info\")\n parser.add_argument(\"--all-reads\", dest='all_reads', action='store_true', help=\"Use all reads from the BAM file. Default: use primary alignments only ('samtools view -F 260')\", default=False)\n parser.add_argument(\"--output-format\", dest='output_format', help=\"Set the output format\", default=\"tsv\")\n parser.add_argument(\"--all-chromosomes\", dest='all_chrs', action='store_true', help=\"Use all chromosomes from the BAM file header. Default: use only chromosomes in the genome index file.\", default=False)\n\n return parser.parse_args()\n\ndef setup_logger():\n \"\"\" Logging setup \"\"\"\n log = logging.getLogger(\"gencov\")\n log.setLevel(logging.getLevelName(args.loglevel.upper()))\n ch = logging.StreamHandler()\n ch.setLevel = log.level\n fmt = logging.Formatter('%(asctime)s - %(message)s', '%Y-%m-%d %H:%M:%S')\n ch.setFormatter(fmt)\n log.addHandler(ch)\n return log\n\nif __name__ == \"__main__\":\n \"\"\"\n Given a bam file, compute the read coverage for different genomic regions:\n\n - exons\n - introns\n - exon-intron junctions\n - intergenic\n\n *** ONLY PRIMARY alignments are used ***\n \"\"\"\n try:\n args = parse_arguments(sys.argv)\n log = setup_logger()\n main(args)\n exit(0)\n except Exception,err:\n log.error(\"Error:\")\n errinfo = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)\n log.error(\"\".join(errinfo))\n exit(1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd
import numpy as np
import datetime as dt
def sum_unique(x):
return np.unique(x).shape[0]
def analyze_count(data):
"""real time, vk, itemid, action"""
dsct_vk = pd.unique(data['vk'])
dsct_itemid = pd.unique(data['itemid'])
print 'number of user:', dsct_vk.shape
print 'number of items:', dsct_itemid.shape
print 'the number of ratings:', data.shape
print 'unique actions:', pd.unique(data['action'])
print 'the number of action 0:', np.sum(data['action'] == 0)
print 'the number of action 1:', np.sum(data['action'] == 1)
print 'the number of action 2:', np.sum(data['action'] == 2)
print 'the number of action 3:', np.sum(data['action'] == 3)
print 'the number of action 4:', np.sum(data['action'] == 4)
time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)
print 'Max Range:', np.max(time_range_item)
print 'Mean Range:', np.mean(time_range_item)
print 'Median Range:', np.median(time_range_item)
|
normal
|
{
"blob_id": "1db16ae1fc6546575150187432265ac1cf834ec2",
"index": 1809,
"step-1": "import pandas as pd\nimport numpy as np\nimport datetime as dt\n\ndef sum_unique(x):\n return np.unique(x).shape[0]\n\ndef analyze_count(data):\n \n \"\"\"real time, vk, itemid, action\"\"\"\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
z = 1j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print (cosinus_real)
print (cosinus_imaginary)
print (sinus_real)
print (sinus_imag)
|
normal
|
{
"blob_id": "7ea608b73f592cffc7723b4319cf1a87b3e9b443",
"index": 4220,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-3": "<mask token>\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-4": "import math\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-5": "import math\n\nz = 1j\n\n\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\n\nprint (cosinus_real)\nprint (cosinus_imaginary)\nprint (sinus_real)\nprint (sinus_imag)\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import code2
print ("Main en code1: %s\n" % __name__)
|
normal
|
{
"blob_id": "ecbc1da3efb39300b60aeb47897fb01b6bd7af31",
"index": 6028,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Main en code1: %s\\n' % __name__)\n",
"step-3": "import code2\nprint('Main en code1: %s\\n' % __name__)\n",
"step-4": "\nimport code2\nprint (\"Main en code1: %s\\n\" % __name__)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import glob
import html
import os
import re
import sys
import textwrap
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import pycountry
import requests
from pyd2v import D2V
from pymediainfo import MediaInfo, Track
from pynfogen.formatter import CustomFormats
class NFO:
AUDIO_CHANNEL_LAYOUT_WEIGHT = {
"LFE": 0.1
}
IMDB_ID_T = re.compile(r"^tt\d{7,8}$")
TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$")
TVDB_ID_T = re.compile(r"^\d+$")
def __init__(self) -> None:
self.media_info: MediaInfo
self.file: str
self.season: Optional[Union[int, str]]
self.episode: Optional[int]
self.episode_name: Optional[str]
self.videos: List[Track]
self.audio: List[Track]
self.subtitles: List[Track]
self.chapters: Dict[str, str]
self.chapters_numbered: bool
self.fanart_api_key: Optional[str]
self.source: Optional[str]
self.note: Optional[str]
self.preview: Optional[str]
self.imdb: str
self.tmdb: Optional[str]
self.tvdb: Optional[int]
self.title_name: str
self.title_year: str
self.episodes: int
self.release_name: str
self.preview_images: List[dict[str, str]]
self.banner_image: Optional[str]
self.session = self.get_session()
def __repr__(self) -> str:
return "<{c} {attrs}>".format(
c=self.__class__.__name__,
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str:
"""
Evaluate and apply formatting on template, apply any art if provided.
Any additional parameters are passed as extra variables to the template.
The extra variables have priority when there's conflicting variable names.
"""
variables = self.__dict__
variables.update(kwargs)
template = CustomFormats().format(template, **variables)
if art:
art = art.format(nfo=template)
template = art
for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template):
# TODO: This if check is quite yucky, look into alternative options.
# Ideally a custom format spec would be great.
template = template.replace(
m.group(0),
m.group(2) if int(m.group(1)) else ""
)
template = "\n".join(map(str.rstrip, template.splitlines(keepends=False)))
return template
def set_config(self, file: str, **config: Any) -> None:
self.file = file
self.media_info = MediaInfo.parse(self.file)
self.fanart_api_key = config.get("fanart_api_key")
self.source = config.get("source")
self.note = config.get("note")
self.preview = config.get("preview")
self.season = config.get("season")
self.episode, self.episode_name = config.get("episode") or (None, None)
self.episodes = self.get_tv_episodes()
self.release_name = self.get_release_name()
self.videos = self.media_info.video_tracks
self.audio = self.media_info.audio_tracks
self.subtitles = self.media_info.text_tracks
tracks_without_language = [
x for x in self.videos + self.audio + self.subtitles
if not x.language or x.language == "und"
]
if tracks_without_language:
print("The following tracks have no language tag! All tracks need a language tag!")
for track in tracks_without_language:
print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)")
print(
"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n"
"Don't forget to verify and add language tags to the rest of the files too!"
)
sys.exit(1)
chapters = next(iter(self.media_info.menu_tracks), None)
if chapters:
self.chapters = {
".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":")
for k, v in chapters.to_data().items()
if f"1{k.replace('_', '')}".isdigit()
}
self.chapters_numbered = all(
x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"]
for i, x in enumerate(self.chapters.values())
)
else:
self.chapters = {}
self.chapters_numbered = False
self.imdb = self.get_imdb_id(config.get("imdb"))
self.tmdb = self.get_tmdb_id(config.get("tmdb"))
self.tvdb = self.get_tvdb_id(config.get("tvdb"))
self.title_name, self.title_year = self.get_title_name_year()
self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None
self.preview_images = self.get_preview_images(self.preview) if self.preview else []
def get_imdb_id(self, imdb_id: Any) -> str:
"""
Get an IMDB ID from either the media's global tags, or the config.
Since IMDB IDs are required for this project, it will bug the user for
one interactively if not found.
"""
if not imdb_id:
general_track = self.media_info.general_tracks[0].to_data()
imdb_id = general_track.get("imdb")
if not imdb_id:
print("No IMDB ID was provided but is required...")
while not imdb_id or not isinstance(imdb_id, str):
user_id = input("IMDB ID (e.g., 'tt0487831'): ")
if not self.IMDB_ID_T.match(user_id):
print(f"The provided IMDB ID {user_id!r} is not valid...")
print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').")
else:
imdb_id = user_id
return imdb_id
def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:
"""
Get a TMDB ID from either the media's global tags, or the config.
It will raise a ValueError if the provided ID is invalid.
"""
if not tmdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tmdb_id = general_track.get("tmdb")
if not tmdb_id:
print("Warning: No TMDB ID was provided...")
return None
if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):
print(f"The provided TMDB ID {tmdb_id!r} is not valid...")
print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').")
raise ValueError("Invalid TMDB ID")
return tmdb_id
def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:
"""
Get a TVDB ID from either the media's global tags, or the config.
It will raise a ValueError if the provided ID is invalid.
"""
if not tvdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tvdb_id = general_track.get("tvdb")
if not tvdb_id:
print("Warning: No TVDB ID was provided...")
return None
if isinstance(tvdb_id, int):
tvdb_id = str(tvdb_id)
if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):
print(f"The provided TVDB ID {tvdb_id!r} is not valid...")
print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').")
raise ValueError("Invalid TVDB ID")
return int(tvdb_id)
def get_title_name_year(self) -> Tuple[str, str]:
"""Scrape Title Name and Year (including e.g. 2019-) from IMDB"""
r = self.session.get(f"https://www.imdb.com/title/{self.imdb}")
if r.status_code != 200:
raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]")
imdb_page = html.unescape(r.text)
imdb_title = re.search(
# testing ground: https://regex101.com/r/bEoEDn/1
r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)"
r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>",
imdb_page
)
if not imdb_title:
raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...")
return imdb_title.group("name").strip(), imdb_title.group("year").strip()
def get_tv_episodes(self) -> int:
"""Calculate total episode count based on neighbouring same-extension files."""
return len(glob.glob(os.path.join(
os.path.dirname(self.file),
f"*{os.path.splitext(self.file)[-1]}"
)))
def get_release_name(self) -> str:
"""
Retrieve the release name based on the file used during MediaInfo.
If a season was specified, but an episode number was not, it presumes the release is a Pack.
Hence when pack, it uses the parent folder's name as the release name.
"""
if self.season is not None and self.episode is None:
return os.path.basename(os.path.dirname(self.file))
return os.path.splitext(os.path.basename(self.file))[0]
def get_banner_image(self, tvdb_id: int) -> Optional[str]:
"""
Get a wide banner image from fanart.tv.
Currently restricts banners to English-only.
"""
if not tvdb_id:
return None
if not self.fanart_api_key:
raise ValueError("Need Fanart.tv api key for TV titles!")
r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}")
if r.status_code == 404:
return None
res = r.json()
error = res.get("error message")
if error:
if error == "Not found":
return None
raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}")
banner = next((
x["url"] for x in (res.get("tvbanner") or [])
if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language
), None)
return banner
def get_preview_images(self, url: str) -> List[Dict[str, str]]:
if not url:
return []
images = []
for domain in ["imgbox.com", "beyondhd.co"]:
if domain not in url.lower():
continue
page = self.session.get(url).text
if domain == "imgbox.com":
for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page):
images.append({
"url": f"https://imgbox.com/{m.group(2)}",
"src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}"
})
elif domain == "beyondhd.co":
for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page):
images.append({
"url": f"https://beyondhd.co/image/{m.group(1)}",
"src": m.group(2)
})
break
return images
def get_video_print(self, videos: List[Track]) -> List[List[str]]:
if not videos:
return [["--"]]
data = []
for video in videos:
codec = {
"MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}"
}.get(video.format, video.format)
scan_overview = video.scan_type
vst = False
if codec in ["MPEG-1", "MPEG-2"]:
# parse d2v file with pyd2v, generates D2V if needed
d2v = D2V.load(Path(self.file))
self.file = d2v.path
# get every frames' flag data, this contains information on displaying frames
# add vob and cell number to each frames flag data as well
flags = [f for line in [
[dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data
] for f in line]
interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100
if interlaced_percent == 100:
scan_overview = "Interlaced (CST)"
else:
scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)"
vst = True
for ext in ["log", "d2v", "mpg", "mpeg"]:
fp = os.path.splitext(self.file)[0] + "." + ext
if os.path.exists(fp):
os.unlink(fp)
line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format(
language=pycountry.languages.get(alpha_2=video.language).name,
codec=codec,
profile=video.format_profile,
width=video.width, height=video.height,
aspect=video.other_display_aspect_ratio[0],
bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}"
)
line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format(
fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate,
fps_mode="VFR" if vst else video.frame_rate_mode,
color_space=video.color_space,
subsampling=video.chroma_subsampling.replace(":", ""),
bit_depth=video.bit_depth,
scan=scan_overview
)
data.append([line_1, line_2])
return data
def get_audio_print(self, audio: List[Track]) -> List[str]:
if not audio:
return ["--"]
data = []
for t in audio:
if t.title and "Commentary" in t.title:
title = t.title
else:
title = pycountry.languages.get(alpha_2=t.language).name
if t.channel_layout:
channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" ")))
else:
channels = float(t.channel_s)
bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else ""
l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}"
data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))]
return data
@staticmethod
def get_subtitle_print(subs: List[Track]) -> List[str]:
"""
Return a list of a brief subtitle overview per-subtitle.
e.g.
- English, Forced, SubRip (SRT)
- English, SubRip (SRT)
- English, SDH, SubRip (SRT)
- Spanish, Latin American (SDH), SubRip (SRT)
The bit of text between the Language and the Subtitle format is the Track Title.
It can be of any format, but it is recommended to be used as shown above.
It will be returned as a list of strings with the `- ` already pre-pended to each entry.
"""
data = []
if not subs:
data.append("--")
for sub in subs:
line_items = []
# following sub.title tree checks and supports three different language and title scenarios
# The second scenario is the recommended option to choose if you are open to choosing any
# The third scenario should be used if you have nothing unique to state about the track
# | Language | Track Title | Output |
# | ------------ | ----------------------------- | --------------------------------------------- |
# | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |
# | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |
# | es / Spanish | None | - Spanish, SubRip (SRT) |
language = pycountry.languages.get(alpha_2=sub.language).name
if sub.title:
if language.lower() in sub.title.lower():
line_items.append(sub.title)
else:
line_items.append(f"{language}, {sub.title}")
else:
line_items.append(language)
line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)"))
line = "- " + ", ".join(line_items)
data += [
(" " + x if i > 0 else x)
for i, x in enumerate(textwrap.wrap(line, 64))
]
return data
@staticmethod
def get_chapter_print(chapters: Dict[str, str]) -> List[str]:
if not chapters:
return ["--"]
return [
f"- {k}: {v}"
for k, v in chapters.items()
]
def get_chapter_print_short(self, chapters: Dict[str, str]) -> str:
if not chapters:
return "No"
if self.chapters_numbered:
return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})"
return "Yes (Named)"
@staticmethod
def get_session() -> requests.Session:
session = requests.Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"UPGRADE-INSECURE-REQUESTS": "1"
})
return session
|
normal
|
{
"blob_id": "e434d5519e3ba4255ed928769070de391cb0955b",
"index": 3462,
"step-1": "<mask token>\n\n\nclass NFO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self) ->str:\n return '<{c} {attrs}>'.format(c=self.__class__.__name__, attrs=' '.\n join('{}={!r}'.format(k, v) for k, v in self.__dict__.items()))\n\n def run(self, template: str, art: Optional[str]=None, **kwargs: Any) ->str:\n \"\"\"\n Evaluate and apply formatting on template, apply any art if provided.\n Any additional parameters are passed as extra variables to the template.\n The extra variables have priority when there's conflicting variable names.\n \"\"\"\n variables = self.__dict__\n variables.update(kwargs)\n template = CustomFormats().format(template, **variables)\n if art:\n art = art.format(nfo=template)\n template = art\n for m in re.finditer('<\\\\?([01])\\\\?([\\\\D\\\\d]*?)\\\\?>', template):\n template = template.replace(m.group(0), m.group(2) if int(m.\n group(1)) else '')\n template = '\\n'.join(map(str.rstrip, template.splitlines(keepends=\n False)))\n return template\n\n def set_config(self, file: str, **config: Any) ->None:\n self.file = file\n self.media_info = MediaInfo.parse(self.file)\n self.fanart_api_key = config.get('fanart_api_key')\n self.source = config.get('source')\n self.note = config.get('note')\n self.preview = config.get('preview')\n self.season = config.get('season')\n self.episode, self.episode_name = config.get('episode') or (None, None)\n self.episodes = self.get_tv_episodes()\n self.release_name = self.get_release_name()\n self.videos = self.media_info.video_tracks\n self.audio = self.media_info.audio_tracks\n self.subtitles = self.media_info.text_tracks\n tracks_without_language = [x for x in self.videos + self.audio +\n self.subtitles if not x.language or x.language == 'und']\n if tracks_without_language:\n print(\n 'The following tracks have no language tag! All tracks need a language tag!'\n )\n for track in tracks_without_language:\n print(\n f'{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)'\n )\n print(\n \"\"\"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\nDon't forget to verify and add language tags to the rest of the files too!\"\"\"\n )\n sys.exit(1)\n chapters = next(iter(self.media_info.menu_tracks), None)\n if chapters:\n self.chapters = {'.'.join([k.replace('_', '.')[:-3], k[-3:]]):\n v.strip(':') for k, v in chapters.to_data().items() if\n f\"1{k.replace('_', '')}\".isdigit()}\n self.chapters_numbered = all(x.split(':', 1)[-1].lower() in [\n f'chapter {i + 1}', f'chapter {str(i + 1).zfill(2)}'] for i,\n x in enumerate(self.chapters.values()))\n else:\n self.chapters = {}\n self.chapters_numbered = False\n self.imdb = self.get_imdb_id(config.get('imdb'))\n self.tmdb = self.get_tmdb_id(config.get('tmdb'))\n self.tvdb = self.get_tvdb_id(config.get('tvdb'))\n self.title_name, self.title_year = self.get_title_name_year()\n self.banner_image = self.get_banner_image(self.tvdb\n ) if self.tvdb and self.fanart_api_key else None\n self.preview_images = self.get_preview_images(self.preview\n ) if self.preview else []\n\n def get_imdb_id(self, imdb_id: Any) ->str:\n \"\"\"\n Get an IMDB ID from either the media's global tags, or the config.\n Since IMDB IDs are required for this project, it will bug the user for\n one interactively if not found.\n \"\"\"\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get('imdb')\n if not imdb_id:\n print('No IMDB ID was provided but is required...')\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f'The provided IMDB ID {user_id!r} is not valid...')\n print(\n \"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\"\n )\n else:\n imdb_id = user_id\n return imdb_id\n\n def get_tmdb_id(self, tmdb_id: Any) ->Optional[str]:\n \"\"\"\n Get a TMDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get('tmdb')\n if not tmdb_id:\n print('Warning: No TMDB ID was provided...')\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f'The provided TMDB ID {tmdb_id!r} is not valid...')\n print(\n \"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\"\n )\n raise ValueError('Invalid TMDB ID')\n return tmdb_id\n\n def get_tvdb_id(self, tvdb_id: Any) ->Optional[int]:\n \"\"\"\n Get a TVDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get('tvdb')\n if not tvdb_id:\n print('Warning: No TVDB ID was provided...')\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f'The provided TVDB ID {tvdb_id!r} is not valid...')\n print(\n \"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\"\n )\n raise ValueError('Invalid TVDB ID')\n return int(tvdb_id)\n <mask token>\n <mask token>\n\n def get_release_name(self) ->str:\n \"\"\"\n Retrieve the release name based on the file used during MediaInfo.\n If a season was specified, but an episode number was not, it presumes the release is a Pack.\n Hence when pack, it uses the parent folder's name as the release name.\n \"\"\"\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]\n\n def get_banner_image(self, tvdb_id: int) ->Optional[str]:\n \"\"\"\n Get a wide banner image from fanart.tv.\n Currently restricts banners to English-only.\n \"\"\"\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError('Need Fanart.tv api key for TV titles!')\n r = self.session.get(\n f'http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}'\n )\n if r.status_code == 404:\n return None\n res = r.json()\n error = res.get('error message')\n if error:\n if error == 'Not found':\n return None\n raise ValueError(\n f'An unexpected error occurred while calling Fanart.tv, {res}')\n banner = next((x['url'] for x in res.get('tvbanner') or [] if x[\n 'lang'] == sorted(self.audio, key=lambda x: x.streamorder)[0].\n language), None)\n return banner\n\n def get_preview_images(self, url: str) ->List[Dict[str, str]]:\n if not url:\n return []\n images = []\n for domain in ['imgbox.com', 'beyondhd.co']:\n if domain not in url.lower():\n continue\n page = self.session.get(url).text\n if domain == 'imgbox.com':\n for m in re.finditer(\n 'src=\"(https://thumbs2.imgbox.com.+/)(\\\\w+)_b.([^\"]+)',\n page):\n images.append({'url':\n f'https://imgbox.com/{m.group(2)}', 'src':\n f'{m.group(1)}{m.group(2)}_t.{m.group(3)}'})\n elif domain == 'beyondhd.co':\n for m in re.finditer(\n '/image/([^\"]+)\"\\\\D+src=\"(https://.*beyondhd.co/images.+/(\\\\w+).md.[^\"]+)'\n , page):\n images.append({'url':\n f'https://beyondhd.co/image/{m.group(1)}', 'src': m\n .group(2)})\n break\n return images\n\n def get_video_print(self, videos: List[Track]) ->List[List[str]]:\n if not videos:\n return [['--']]\n data = []\n for video in videos:\n codec = {'MPEG Video':\n f\"MPEG-{(video.format_version or '').replace('Version ', '')}\"\n }.get(video.format, video.format)\n scan_overview = video.scan_type\n vst = False\n if codec in ['MPEG-1', 'MPEG-2']:\n d2v = D2V.load(Path(self.file))\n self.file = d2v.path\n flags = [f for line in [[dict(**y, vob=x['vob'], cell=x[\n 'cell']) for y in x['flags']] for x in d2v.data] for f in\n line]\n interlaced_percent = sum(1 for f in flags if not f[\n 'progressive_frame']) / len(flags) * 100\n if interlaced_percent == 100:\n scan_overview = 'Interlaced (CST)'\n else:\n scan_overview = (\n f'{round(interlaced_percent, 2)}% Interlaced (VST)')\n vst = True\n for ext in ['log', 'd2v', 'mpg', 'mpeg']:\n fp = os.path.splitext(self.file)[0] + '.' + ext\n if os.path.exists(fp):\n os.unlink(fp)\n line_1 = (\n '- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}'\n .format(language=pycountry.languages.get(alpha_2=video.\n language).name, codec=codec, profile=video.format_profile,\n width=video.width, height=video.height, aspect=video.\n other_display_aspect_ratio[0], bitrate=\n f\"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}\"\n ))\n line_2 = (\n ' {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}'\n .format(fps=f'{video.framerate_num}/{video.framerate_den}' if\n video.framerate_num else video.frame_rate, fps_mode='VFR' if\n vst else video.frame_rate_mode, color_space=video.\n color_space, subsampling=video.chroma_subsampling.replace(\n ':', ''), bit_depth=video.bit_depth, scan=scan_overview))\n data.append([line_1, line_2])\n return data\n\n def get_audio_print(self, audio: List[Track]) ->List[str]:\n if not audio:\n return ['--']\n data = []\n for t in audio:\n if t.title and 'Commentary' in t.title:\n title = t.title\n else:\n title = pycountry.languages.get(alpha_2=t.language).name\n if t.channel_layout:\n channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x,\n 1) for x in t.channel_layout.split(' ')))\n else:\n channels = float(t.channel_s)\n bit_rate_mode = f' ({t.bit_rate_mode})' if t.bit_rate_mode else ''\n l1 = (\n f'- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}'\n )\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(l1, 64))]\n return data\n <mask token>\n <mask token>\n\n def get_chapter_print_short(self, chapters: Dict[str, str]) ->str:\n if not chapters:\n return 'No'\n if self.chapters_numbered:\n return f'Yes (Numbered 01-{str(len(chapters)).zfill(2)})'\n return 'Yes (Named)'\n\n @staticmethod\n def get_session() ->requests.Session:\n session = requests.Session()\n session.headers.update({'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Language': 'en-US,en;q=0.5', 'DNT': '1',\n 'UPGRADE-INSECURE-REQUESTS': '1'})\n return session\n",
"step-2": "<mask token>\n\n\nclass NFO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self) ->str:\n return '<{c} {attrs}>'.format(c=self.__class__.__name__, attrs=' '.\n join('{}={!r}'.format(k, v) for k, v in self.__dict__.items()))\n\n def run(self, template: str, art: Optional[str]=None, **kwargs: Any) ->str:\n \"\"\"\n Evaluate and apply formatting on template, apply any art if provided.\n Any additional parameters are passed as extra variables to the template.\n The extra variables have priority when there's conflicting variable names.\n \"\"\"\n variables = self.__dict__\n variables.update(kwargs)\n template = CustomFormats().format(template, **variables)\n if art:\n art = art.format(nfo=template)\n template = art\n for m in re.finditer('<\\\\?([01])\\\\?([\\\\D\\\\d]*?)\\\\?>', template):\n template = template.replace(m.group(0), m.group(2) if int(m.\n group(1)) else '')\n template = '\\n'.join(map(str.rstrip, template.splitlines(keepends=\n False)))\n return template\n\n def set_config(self, file: str, **config: Any) ->None:\n self.file = file\n self.media_info = MediaInfo.parse(self.file)\n self.fanart_api_key = config.get('fanart_api_key')\n self.source = config.get('source')\n self.note = config.get('note')\n self.preview = config.get('preview')\n self.season = config.get('season')\n self.episode, self.episode_name = config.get('episode') or (None, None)\n self.episodes = self.get_tv_episodes()\n self.release_name = self.get_release_name()\n self.videos = self.media_info.video_tracks\n self.audio = self.media_info.audio_tracks\n self.subtitles = self.media_info.text_tracks\n tracks_without_language = [x for x in self.videos + self.audio +\n self.subtitles if not x.language or x.language == 'und']\n if tracks_without_language:\n print(\n 'The following tracks have no language tag! All tracks need a language tag!'\n )\n for track in tracks_without_language:\n print(\n f'{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)'\n )\n print(\n \"\"\"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\nDon't forget to verify and add language tags to the rest of the files too!\"\"\"\n )\n sys.exit(1)\n chapters = next(iter(self.media_info.menu_tracks), None)\n if chapters:\n self.chapters = {'.'.join([k.replace('_', '.')[:-3], k[-3:]]):\n v.strip(':') for k, v in chapters.to_data().items() if\n f\"1{k.replace('_', '')}\".isdigit()}\n self.chapters_numbered = all(x.split(':', 1)[-1].lower() in [\n f'chapter {i + 1}', f'chapter {str(i + 1).zfill(2)}'] for i,\n x in enumerate(self.chapters.values()))\n else:\n self.chapters = {}\n self.chapters_numbered = False\n self.imdb = self.get_imdb_id(config.get('imdb'))\n self.tmdb = self.get_tmdb_id(config.get('tmdb'))\n self.tvdb = self.get_tvdb_id(config.get('tvdb'))\n self.title_name, self.title_year = self.get_title_name_year()\n self.banner_image = self.get_banner_image(self.tvdb\n ) if self.tvdb and self.fanart_api_key else None\n self.preview_images = self.get_preview_images(self.preview\n ) if self.preview else []\n\n def get_imdb_id(self, imdb_id: Any) ->str:\n \"\"\"\n Get an IMDB ID from either the media's global tags, or the config.\n Since IMDB IDs are required for this project, it will bug the user for\n one interactively if not found.\n \"\"\"\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get('imdb')\n if not imdb_id:\n print('No IMDB ID was provided but is required...')\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f'The provided IMDB ID {user_id!r} is not valid...')\n print(\n \"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\"\n )\n else:\n imdb_id = user_id\n return imdb_id\n\n def get_tmdb_id(self, tmdb_id: Any) ->Optional[str]:\n \"\"\"\n Get a TMDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get('tmdb')\n if not tmdb_id:\n print('Warning: No TMDB ID was provided...')\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f'The provided TMDB ID {tmdb_id!r} is not valid...')\n print(\n \"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\"\n )\n raise ValueError('Invalid TMDB ID')\n return tmdb_id\n\n def get_tvdb_id(self, tvdb_id: Any) ->Optional[int]:\n \"\"\"\n Get a TVDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get('tvdb')\n if not tvdb_id:\n print('Warning: No TVDB ID was provided...')\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f'The provided TVDB ID {tvdb_id!r} is not valid...')\n print(\n \"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\"\n )\n raise ValueError('Invalid TVDB ID')\n return int(tvdb_id)\n\n def get_title_name_year(self) ->Tuple[str, str]:\n \"\"\"Scrape Title Name and Year (including e.g. 2019-) from IMDB\"\"\"\n r = self.session.get(f'https://www.imdb.com/title/{self.imdb}')\n if r.status_code != 200:\n raise ValueError(\n f'An unexpected error occurred getting IMDB Title Page [{r.status_code}]'\n )\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n '<title>(?P<name>.+) \\\\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)(?P<year>(\\\\d{4})(|– |–\\\\d{4})))\\\\) - IMDb</title>'\n , imdb_page)\n if not imdb_title:\n raise ValueError(\n f'Could not scrape Movie Title or Year for {self.imdb}...')\n return imdb_title.group('name').strip(), imdb_title.group('year'\n ).strip()\n <mask token>\n\n def get_release_name(self) ->str:\n \"\"\"\n Retrieve the release name based on the file used during MediaInfo.\n If a season was specified, but an episode number was not, it presumes the release is a Pack.\n Hence when pack, it uses the parent folder's name as the release name.\n \"\"\"\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]\n\n def get_banner_image(self, tvdb_id: int) ->Optional[str]:\n \"\"\"\n Get a wide banner image from fanart.tv.\n Currently restricts banners to English-only.\n \"\"\"\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError('Need Fanart.tv api key for TV titles!')\n r = self.session.get(\n f'http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}'\n )\n if r.status_code == 404:\n return None\n res = r.json()\n error = res.get('error message')\n if error:\n if error == 'Not found':\n return None\n raise ValueError(\n f'An unexpected error occurred while calling Fanart.tv, {res}')\n banner = next((x['url'] for x in res.get('tvbanner') or [] if x[\n 'lang'] == sorted(self.audio, key=lambda x: x.streamorder)[0].\n language), None)\n return banner\n\n def get_preview_images(self, url: str) ->List[Dict[str, str]]:\n if not url:\n return []\n images = []\n for domain in ['imgbox.com', 'beyondhd.co']:\n if domain not in url.lower():\n continue\n page = self.session.get(url).text\n if domain == 'imgbox.com':\n for m in re.finditer(\n 'src=\"(https://thumbs2.imgbox.com.+/)(\\\\w+)_b.([^\"]+)',\n page):\n images.append({'url':\n f'https://imgbox.com/{m.group(2)}', 'src':\n f'{m.group(1)}{m.group(2)}_t.{m.group(3)}'})\n elif domain == 'beyondhd.co':\n for m in re.finditer(\n '/image/([^\"]+)\"\\\\D+src=\"(https://.*beyondhd.co/images.+/(\\\\w+).md.[^\"]+)'\n , page):\n images.append({'url':\n f'https://beyondhd.co/image/{m.group(1)}', 'src': m\n .group(2)})\n break\n return images\n\n def get_video_print(self, videos: List[Track]) ->List[List[str]]:\n if not videos:\n return [['--']]\n data = []\n for video in videos:\n codec = {'MPEG Video':\n f\"MPEG-{(video.format_version or '').replace('Version ', '')}\"\n }.get(video.format, video.format)\n scan_overview = video.scan_type\n vst = False\n if codec in ['MPEG-1', 'MPEG-2']:\n d2v = D2V.load(Path(self.file))\n self.file = d2v.path\n flags = [f for line in [[dict(**y, vob=x['vob'], cell=x[\n 'cell']) for y in x['flags']] for x in d2v.data] for f in\n line]\n interlaced_percent = sum(1 for f in flags if not f[\n 'progressive_frame']) / len(flags) * 100\n if interlaced_percent == 100:\n scan_overview = 'Interlaced (CST)'\n else:\n scan_overview = (\n f'{round(interlaced_percent, 2)}% Interlaced (VST)')\n vst = True\n for ext in ['log', 'd2v', 'mpg', 'mpeg']:\n fp = os.path.splitext(self.file)[0] + '.' + ext\n if os.path.exists(fp):\n os.unlink(fp)\n line_1 = (\n '- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}'\n .format(language=pycountry.languages.get(alpha_2=video.\n language).name, codec=codec, profile=video.format_profile,\n width=video.width, height=video.height, aspect=video.\n other_display_aspect_ratio[0], bitrate=\n f\"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}\"\n ))\n line_2 = (\n ' {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}'\n .format(fps=f'{video.framerate_num}/{video.framerate_den}' if\n video.framerate_num else video.frame_rate, fps_mode='VFR' if\n vst else video.frame_rate_mode, color_space=video.\n color_space, subsampling=video.chroma_subsampling.replace(\n ':', ''), bit_depth=video.bit_depth, scan=scan_overview))\n data.append([line_1, line_2])\n return data\n\n def get_audio_print(self, audio: List[Track]) ->List[str]:\n if not audio:\n return ['--']\n data = []\n for t in audio:\n if t.title and 'Commentary' in t.title:\n title = t.title\n else:\n title = pycountry.languages.get(alpha_2=t.language).name\n if t.channel_layout:\n channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x,\n 1) for x in t.channel_layout.split(' ')))\n else:\n channels = float(t.channel_s)\n bit_rate_mode = f' ({t.bit_rate_mode})' if t.bit_rate_mode else ''\n l1 = (\n f'- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}'\n )\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(l1, 64))]\n return data\n\n @staticmethod\n def get_subtitle_print(subs: List[Track]) ->List[str]:\n \"\"\"\n Return a list of a brief subtitle overview per-subtitle.\n\n e.g.\n - English, Forced, SubRip (SRT)\n - English, SubRip (SRT)\n - English, SDH, SubRip (SRT)\n - Spanish, Latin American (SDH), SubRip (SRT)\n\n The bit of text between the Language and the Subtitle format is the Track Title.\n It can be of any format, but it is recommended to be used as shown above.\n\n It will be returned as a list of strings with the `- ` already pre-pended to each entry.\n \"\"\"\n data = []\n if not subs:\n data.append('--')\n for sub in subs:\n line_items = []\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f'{language}, {sub.title}')\n else:\n line_items.append(language)\n line_items.append(sub.format.replace('UTF-8', 'SubRip (SRT)'))\n line = '- ' + ', '.join(line_items)\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(line, 64))]\n return data\n <mask token>\n\n def get_chapter_print_short(self, chapters: Dict[str, str]) ->str:\n if not chapters:\n return 'No'\n if self.chapters_numbered:\n return f'Yes (Numbered 01-{str(len(chapters)).zfill(2)})'\n return 'Yes (Named)'\n\n @staticmethod\n def get_session() ->requests.Session:\n session = requests.Session()\n session.headers.update({'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Language': 'en-US,en;q=0.5', 'DNT': '1',\n 'UPGRADE-INSECURE-REQUESTS': '1'})\n return session\n",
"step-3": "<mask token>\n\n\nclass NFO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self) ->str:\n return '<{c} {attrs}>'.format(c=self.__class__.__name__, attrs=' '.\n join('{}={!r}'.format(k, v) for k, v in self.__dict__.items()))\n\n def run(self, template: str, art: Optional[str]=None, **kwargs: Any) ->str:\n \"\"\"\n Evaluate and apply formatting on template, apply any art if provided.\n Any additional parameters are passed as extra variables to the template.\n The extra variables have priority when there's conflicting variable names.\n \"\"\"\n variables = self.__dict__\n variables.update(kwargs)\n template = CustomFormats().format(template, **variables)\n if art:\n art = art.format(nfo=template)\n template = art\n for m in re.finditer('<\\\\?([01])\\\\?([\\\\D\\\\d]*?)\\\\?>', template):\n template = template.replace(m.group(0), m.group(2) if int(m.\n group(1)) else '')\n template = '\\n'.join(map(str.rstrip, template.splitlines(keepends=\n False)))\n return template\n\n def set_config(self, file: str, **config: Any) ->None:\n self.file = file\n self.media_info = MediaInfo.parse(self.file)\n self.fanart_api_key = config.get('fanart_api_key')\n self.source = config.get('source')\n self.note = config.get('note')\n self.preview = config.get('preview')\n self.season = config.get('season')\n self.episode, self.episode_name = config.get('episode') or (None, None)\n self.episodes = self.get_tv_episodes()\n self.release_name = self.get_release_name()\n self.videos = self.media_info.video_tracks\n self.audio = self.media_info.audio_tracks\n self.subtitles = self.media_info.text_tracks\n tracks_without_language = [x for x in self.videos + self.audio +\n self.subtitles if not x.language or x.language == 'und']\n if tracks_without_language:\n print(\n 'The following tracks have no language tag! All tracks need a language tag!'\n )\n for track in tracks_without_language:\n print(\n f'{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)'\n )\n print(\n \"\"\"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\nDon't forget to verify and add language tags to the rest of the files too!\"\"\"\n )\n sys.exit(1)\n chapters = next(iter(self.media_info.menu_tracks), None)\n if chapters:\n self.chapters = {'.'.join([k.replace('_', '.')[:-3], k[-3:]]):\n v.strip(':') for k, v in chapters.to_data().items() if\n f\"1{k.replace('_', '')}\".isdigit()}\n self.chapters_numbered = all(x.split(':', 1)[-1].lower() in [\n f'chapter {i + 1}', f'chapter {str(i + 1).zfill(2)}'] for i,\n x in enumerate(self.chapters.values()))\n else:\n self.chapters = {}\n self.chapters_numbered = False\n self.imdb = self.get_imdb_id(config.get('imdb'))\n self.tmdb = self.get_tmdb_id(config.get('tmdb'))\n self.tvdb = self.get_tvdb_id(config.get('tvdb'))\n self.title_name, self.title_year = self.get_title_name_year()\n self.banner_image = self.get_banner_image(self.tvdb\n ) if self.tvdb and self.fanart_api_key else None\n self.preview_images = self.get_preview_images(self.preview\n ) if self.preview else []\n\n def get_imdb_id(self, imdb_id: Any) ->str:\n \"\"\"\n Get an IMDB ID from either the media's global tags, or the config.\n Since IMDB IDs are required for this project, it will bug the user for\n one interactively if not found.\n \"\"\"\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get('imdb')\n if not imdb_id:\n print('No IMDB ID was provided but is required...')\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f'The provided IMDB ID {user_id!r} is not valid...')\n print(\n \"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\"\n )\n else:\n imdb_id = user_id\n return imdb_id\n\n def get_tmdb_id(self, tmdb_id: Any) ->Optional[str]:\n \"\"\"\n Get a TMDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get('tmdb')\n if not tmdb_id:\n print('Warning: No TMDB ID was provided...')\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f'The provided TMDB ID {tmdb_id!r} is not valid...')\n print(\n \"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\"\n )\n raise ValueError('Invalid TMDB ID')\n return tmdb_id\n\n def get_tvdb_id(self, tvdb_id: Any) ->Optional[int]:\n \"\"\"\n Get a TVDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get('tvdb')\n if not tvdb_id:\n print('Warning: No TVDB ID was provided...')\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f'The provided TVDB ID {tvdb_id!r} is not valid...')\n print(\n \"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\"\n )\n raise ValueError('Invalid TVDB ID')\n return int(tvdb_id)\n\n def get_title_name_year(self) ->Tuple[str, str]:\n \"\"\"Scrape Title Name and Year (including e.g. 2019-) from IMDB\"\"\"\n r = self.session.get(f'https://www.imdb.com/title/{self.imdb}')\n if r.status_code != 200:\n raise ValueError(\n f'An unexpected error occurred getting IMDB Title Page [{r.status_code}]'\n )\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n '<title>(?P<name>.+) \\\\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)(?P<year>(\\\\d{4})(|– |–\\\\d{4})))\\\\) - IMDb</title>'\n , imdb_page)\n if not imdb_title:\n raise ValueError(\n f'Could not scrape Movie Title or Year for {self.imdb}...')\n return imdb_title.group('name').strip(), imdb_title.group('year'\n ).strip()\n\n def get_tv_episodes(self) ->int:\n \"\"\"Calculate total episode count based on neighbouring same-extension files.\"\"\"\n return len(glob.glob(os.path.join(os.path.dirname(self.file),\n f'*{os.path.splitext(self.file)[-1]}')))\n\n def get_release_name(self) ->str:\n \"\"\"\n Retrieve the release name based on the file used during MediaInfo.\n If a season was specified, but an episode number was not, it presumes the release is a Pack.\n Hence when pack, it uses the parent folder's name as the release name.\n \"\"\"\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]\n\n def get_banner_image(self, tvdb_id: int) ->Optional[str]:\n \"\"\"\n Get a wide banner image from fanart.tv.\n Currently restricts banners to English-only.\n \"\"\"\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError('Need Fanart.tv api key for TV titles!')\n r = self.session.get(\n f'http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}'\n )\n if r.status_code == 404:\n return None\n res = r.json()\n error = res.get('error message')\n if error:\n if error == 'Not found':\n return None\n raise ValueError(\n f'An unexpected error occurred while calling Fanart.tv, {res}')\n banner = next((x['url'] for x in res.get('tvbanner') or [] if x[\n 'lang'] == sorted(self.audio, key=lambda x: x.streamorder)[0].\n language), None)\n return banner\n\n def get_preview_images(self, url: str) ->List[Dict[str, str]]:\n if not url:\n return []\n images = []\n for domain in ['imgbox.com', 'beyondhd.co']:\n if domain not in url.lower():\n continue\n page = self.session.get(url).text\n if domain == 'imgbox.com':\n for m in re.finditer(\n 'src=\"(https://thumbs2.imgbox.com.+/)(\\\\w+)_b.([^\"]+)',\n page):\n images.append({'url':\n f'https://imgbox.com/{m.group(2)}', 'src':\n f'{m.group(1)}{m.group(2)}_t.{m.group(3)}'})\n elif domain == 'beyondhd.co':\n for m in re.finditer(\n '/image/([^\"]+)\"\\\\D+src=\"(https://.*beyondhd.co/images.+/(\\\\w+).md.[^\"]+)'\n , page):\n images.append({'url':\n f'https://beyondhd.co/image/{m.group(1)}', 'src': m\n .group(2)})\n break\n return images\n\n def get_video_print(self, videos: List[Track]) ->List[List[str]]:\n if not videos:\n return [['--']]\n data = []\n for video in videos:\n codec = {'MPEG Video':\n f\"MPEG-{(video.format_version or '').replace('Version ', '')}\"\n }.get(video.format, video.format)\n scan_overview = video.scan_type\n vst = False\n if codec in ['MPEG-1', 'MPEG-2']:\n d2v = D2V.load(Path(self.file))\n self.file = d2v.path\n flags = [f for line in [[dict(**y, vob=x['vob'], cell=x[\n 'cell']) for y in x['flags']] for x in d2v.data] for f in\n line]\n interlaced_percent = sum(1 for f in flags if not f[\n 'progressive_frame']) / len(flags) * 100\n if interlaced_percent == 100:\n scan_overview = 'Interlaced (CST)'\n else:\n scan_overview = (\n f'{round(interlaced_percent, 2)}% Interlaced (VST)')\n vst = True\n for ext in ['log', 'd2v', 'mpg', 'mpeg']:\n fp = os.path.splitext(self.file)[0] + '.' + ext\n if os.path.exists(fp):\n os.unlink(fp)\n line_1 = (\n '- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}'\n .format(language=pycountry.languages.get(alpha_2=video.\n language).name, codec=codec, profile=video.format_profile,\n width=video.width, height=video.height, aspect=video.\n other_display_aspect_ratio[0], bitrate=\n f\"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}\"\n ))\n line_2 = (\n ' {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}'\n .format(fps=f'{video.framerate_num}/{video.framerate_den}' if\n video.framerate_num else video.frame_rate, fps_mode='VFR' if\n vst else video.frame_rate_mode, color_space=video.\n color_space, subsampling=video.chroma_subsampling.replace(\n ':', ''), bit_depth=video.bit_depth, scan=scan_overview))\n data.append([line_1, line_2])\n return data\n\n def get_audio_print(self, audio: List[Track]) ->List[str]:\n if not audio:\n return ['--']\n data = []\n for t in audio:\n if t.title and 'Commentary' in t.title:\n title = t.title\n else:\n title = pycountry.languages.get(alpha_2=t.language).name\n if t.channel_layout:\n channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x,\n 1) for x in t.channel_layout.split(' ')))\n else:\n channels = float(t.channel_s)\n bit_rate_mode = f' ({t.bit_rate_mode})' if t.bit_rate_mode else ''\n l1 = (\n f'- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}'\n )\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(l1, 64))]\n return data\n\n @staticmethod\n def get_subtitle_print(subs: List[Track]) ->List[str]:\n \"\"\"\n Return a list of a brief subtitle overview per-subtitle.\n\n e.g.\n - English, Forced, SubRip (SRT)\n - English, SubRip (SRT)\n - English, SDH, SubRip (SRT)\n - Spanish, Latin American (SDH), SubRip (SRT)\n\n The bit of text between the Language and the Subtitle format is the Track Title.\n It can be of any format, but it is recommended to be used as shown above.\n\n It will be returned as a list of strings with the `- ` already pre-pended to each entry.\n \"\"\"\n data = []\n if not subs:\n data.append('--')\n for sub in subs:\n line_items = []\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f'{language}, {sub.title}')\n else:\n line_items.append(language)\n line_items.append(sub.format.replace('UTF-8', 'SubRip (SRT)'))\n line = '- ' + ', '.join(line_items)\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(line, 64))]\n return data\n\n @staticmethod\n def get_chapter_print(chapters: Dict[str, str]) ->List[str]:\n if not chapters:\n return ['--']\n return [f'- {k}: {v}' for k, v in chapters.items()]\n\n def get_chapter_print_short(self, chapters: Dict[str, str]) ->str:\n if not chapters:\n return 'No'\n if self.chapters_numbered:\n return f'Yes (Numbered 01-{str(len(chapters)).zfill(2)})'\n return 'Yes (Named)'\n\n @staticmethod\n def get_session() ->requests.Session:\n session = requests.Session()\n session.headers.update({'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Language': 'en-US,en;q=0.5', 'DNT': '1',\n 'UPGRADE-INSECURE-REQUESTS': '1'})\n return session\n",
"step-4": "<mask token>\n\n\nclass NFO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self) ->None:\n self.media_info: MediaInfo\n self.file: str\n self.season: Optional[Union[int, str]]\n self.episode: Optional[int]\n self.episode_name: Optional[str]\n self.videos: List[Track]\n self.audio: List[Track]\n self.subtitles: List[Track]\n self.chapters: Dict[str, str]\n self.chapters_numbered: bool\n self.fanart_api_key: Optional[str]\n self.source: Optional[str]\n self.note: Optional[str]\n self.preview: Optional[str]\n self.imdb: str\n self.tmdb: Optional[str]\n self.tvdb: Optional[int]\n self.title_name: str\n self.title_year: str\n self.episodes: int\n self.release_name: str\n self.preview_images: List[dict[str, str]]\n self.banner_image: Optional[str]\n self.session = self.get_session()\n\n def __repr__(self) ->str:\n return '<{c} {attrs}>'.format(c=self.__class__.__name__, attrs=' '.\n join('{}={!r}'.format(k, v) for k, v in self.__dict__.items()))\n\n def run(self, template: str, art: Optional[str]=None, **kwargs: Any) ->str:\n \"\"\"\n Evaluate and apply formatting on template, apply any art if provided.\n Any additional parameters are passed as extra variables to the template.\n The extra variables have priority when there's conflicting variable names.\n \"\"\"\n variables = self.__dict__\n variables.update(kwargs)\n template = CustomFormats().format(template, **variables)\n if art:\n art = art.format(nfo=template)\n template = art\n for m in re.finditer('<\\\\?([01])\\\\?([\\\\D\\\\d]*?)\\\\?>', template):\n template = template.replace(m.group(0), m.group(2) if int(m.\n group(1)) else '')\n template = '\\n'.join(map(str.rstrip, template.splitlines(keepends=\n False)))\n return template\n\n def set_config(self, file: str, **config: Any) ->None:\n self.file = file\n self.media_info = MediaInfo.parse(self.file)\n self.fanart_api_key = config.get('fanart_api_key')\n self.source = config.get('source')\n self.note = config.get('note')\n self.preview = config.get('preview')\n self.season = config.get('season')\n self.episode, self.episode_name = config.get('episode') or (None, None)\n self.episodes = self.get_tv_episodes()\n self.release_name = self.get_release_name()\n self.videos = self.media_info.video_tracks\n self.audio = self.media_info.audio_tracks\n self.subtitles = self.media_info.text_tracks\n tracks_without_language = [x for x in self.videos + self.audio +\n self.subtitles if not x.language or x.language == 'und']\n if tracks_without_language:\n print(\n 'The following tracks have no language tag! All tracks need a language tag!'\n )\n for track in tracks_without_language:\n print(\n f'{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)'\n )\n print(\n \"\"\"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\nDon't forget to verify and add language tags to the rest of the files too!\"\"\"\n )\n sys.exit(1)\n chapters = next(iter(self.media_info.menu_tracks), None)\n if chapters:\n self.chapters = {'.'.join([k.replace('_', '.')[:-3], k[-3:]]):\n v.strip(':') for k, v in chapters.to_data().items() if\n f\"1{k.replace('_', '')}\".isdigit()}\n self.chapters_numbered = all(x.split(':', 1)[-1].lower() in [\n f'chapter {i + 1}', f'chapter {str(i + 1).zfill(2)}'] for i,\n x in enumerate(self.chapters.values()))\n else:\n self.chapters = {}\n self.chapters_numbered = False\n self.imdb = self.get_imdb_id(config.get('imdb'))\n self.tmdb = self.get_tmdb_id(config.get('tmdb'))\n self.tvdb = self.get_tvdb_id(config.get('tvdb'))\n self.title_name, self.title_year = self.get_title_name_year()\n self.banner_image = self.get_banner_image(self.tvdb\n ) if self.tvdb and self.fanart_api_key else None\n self.preview_images = self.get_preview_images(self.preview\n ) if self.preview else []\n\n def get_imdb_id(self, imdb_id: Any) ->str:\n \"\"\"\n Get an IMDB ID from either the media's global tags, or the config.\n Since IMDB IDs are required for this project, it will bug the user for\n one interactively if not found.\n \"\"\"\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get('imdb')\n if not imdb_id:\n print('No IMDB ID was provided but is required...')\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f'The provided IMDB ID {user_id!r} is not valid...')\n print(\n \"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\"\n )\n else:\n imdb_id = user_id\n return imdb_id\n\n def get_tmdb_id(self, tmdb_id: Any) ->Optional[str]:\n \"\"\"\n Get a TMDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get('tmdb')\n if not tmdb_id:\n print('Warning: No TMDB ID was provided...')\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f'The provided TMDB ID {tmdb_id!r} is not valid...')\n print(\n \"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\"\n )\n raise ValueError('Invalid TMDB ID')\n return tmdb_id\n\n def get_tvdb_id(self, tvdb_id: Any) ->Optional[int]:\n \"\"\"\n Get a TVDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get('tvdb')\n if not tvdb_id:\n print('Warning: No TVDB ID was provided...')\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f'The provided TVDB ID {tvdb_id!r} is not valid...')\n print(\n \"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\"\n )\n raise ValueError('Invalid TVDB ID')\n return int(tvdb_id)\n\n def get_title_name_year(self) ->Tuple[str, str]:\n \"\"\"Scrape Title Name and Year (including e.g. 2019-) from IMDB\"\"\"\n r = self.session.get(f'https://www.imdb.com/title/{self.imdb}')\n if r.status_code != 200:\n raise ValueError(\n f'An unexpected error occurred getting IMDB Title Page [{r.status_code}]'\n )\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n '<title>(?P<name>.+) \\\\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)(?P<year>(\\\\d{4})(|– |–\\\\d{4})))\\\\) - IMDb</title>'\n , imdb_page)\n if not imdb_title:\n raise ValueError(\n f'Could not scrape Movie Title or Year for {self.imdb}...')\n return imdb_title.group('name').strip(), imdb_title.group('year'\n ).strip()\n\n def get_tv_episodes(self) ->int:\n \"\"\"Calculate total episode count based on neighbouring same-extension files.\"\"\"\n return len(glob.glob(os.path.join(os.path.dirname(self.file),\n f'*{os.path.splitext(self.file)[-1]}')))\n\n def get_release_name(self) ->str:\n \"\"\"\n Retrieve the release name based on the file used during MediaInfo.\n If a season was specified, but an episode number was not, it presumes the release is a Pack.\n Hence when pack, it uses the parent folder's name as the release name.\n \"\"\"\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]\n\n def get_banner_image(self, tvdb_id: int) ->Optional[str]:\n \"\"\"\n Get a wide banner image from fanart.tv.\n Currently restricts banners to English-only.\n \"\"\"\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError('Need Fanart.tv api key for TV titles!')\n r = self.session.get(\n f'http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}'\n )\n if r.status_code == 404:\n return None\n res = r.json()\n error = res.get('error message')\n if error:\n if error == 'Not found':\n return None\n raise ValueError(\n f'An unexpected error occurred while calling Fanart.tv, {res}')\n banner = next((x['url'] for x in res.get('tvbanner') or [] if x[\n 'lang'] == sorted(self.audio, key=lambda x: x.streamorder)[0].\n language), None)\n return banner\n\n def get_preview_images(self, url: str) ->List[Dict[str, str]]:\n if not url:\n return []\n images = []\n for domain in ['imgbox.com', 'beyondhd.co']:\n if domain not in url.lower():\n continue\n page = self.session.get(url).text\n if domain == 'imgbox.com':\n for m in re.finditer(\n 'src=\"(https://thumbs2.imgbox.com.+/)(\\\\w+)_b.([^\"]+)',\n page):\n images.append({'url':\n f'https://imgbox.com/{m.group(2)}', 'src':\n f'{m.group(1)}{m.group(2)}_t.{m.group(3)}'})\n elif domain == 'beyondhd.co':\n for m in re.finditer(\n '/image/([^\"]+)\"\\\\D+src=\"(https://.*beyondhd.co/images.+/(\\\\w+).md.[^\"]+)'\n , page):\n images.append({'url':\n f'https://beyondhd.co/image/{m.group(1)}', 'src': m\n .group(2)})\n break\n return images\n\n def get_video_print(self, videos: List[Track]) ->List[List[str]]:\n if not videos:\n return [['--']]\n data = []\n for video in videos:\n codec = {'MPEG Video':\n f\"MPEG-{(video.format_version or '').replace('Version ', '')}\"\n }.get(video.format, video.format)\n scan_overview = video.scan_type\n vst = False\n if codec in ['MPEG-1', 'MPEG-2']:\n d2v = D2V.load(Path(self.file))\n self.file = d2v.path\n flags = [f for line in [[dict(**y, vob=x['vob'], cell=x[\n 'cell']) for y in x['flags']] for x in d2v.data] for f in\n line]\n interlaced_percent = sum(1 for f in flags if not f[\n 'progressive_frame']) / len(flags) * 100\n if interlaced_percent == 100:\n scan_overview = 'Interlaced (CST)'\n else:\n scan_overview = (\n f'{round(interlaced_percent, 2)}% Interlaced (VST)')\n vst = True\n for ext in ['log', 'd2v', 'mpg', 'mpeg']:\n fp = os.path.splitext(self.file)[0] + '.' + ext\n if os.path.exists(fp):\n os.unlink(fp)\n line_1 = (\n '- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}'\n .format(language=pycountry.languages.get(alpha_2=video.\n language).name, codec=codec, profile=video.format_profile,\n width=video.width, height=video.height, aspect=video.\n other_display_aspect_ratio[0], bitrate=\n f\"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}\"\n ))\n line_2 = (\n ' {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}'\n .format(fps=f'{video.framerate_num}/{video.framerate_den}' if\n video.framerate_num else video.frame_rate, fps_mode='VFR' if\n vst else video.frame_rate_mode, color_space=video.\n color_space, subsampling=video.chroma_subsampling.replace(\n ':', ''), bit_depth=video.bit_depth, scan=scan_overview))\n data.append([line_1, line_2])\n return data\n\n def get_audio_print(self, audio: List[Track]) ->List[str]:\n if not audio:\n return ['--']\n data = []\n for t in audio:\n if t.title and 'Commentary' in t.title:\n title = t.title\n else:\n title = pycountry.languages.get(alpha_2=t.language).name\n if t.channel_layout:\n channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x,\n 1) for x in t.channel_layout.split(' ')))\n else:\n channels = float(t.channel_s)\n bit_rate_mode = f' ({t.bit_rate_mode})' if t.bit_rate_mode else ''\n l1 = (\n f'- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}'\n )\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(l1, 64))]\n return data\n\n @staticmethod\n def get_subtitle_print(subs: List[Track]) ->List[str]:\n \"\"\"\n Return a list of a brief subtitle overview per-subtitle.\n\n e.g.\n - English, Forced, SubRip (SRT)\n - English, SubRip (SRT)\n - English, SDH, SubRip (SRT)\n - Spanish, Latin American (SDH), SubRip (SRT)\n\n The bit of text between the Language and the Subtitle format is the Track Title.\n It can be of any format, but it is recommended to be used as shown above.\n\n It will be returned as a list of strings with the `- ` already pre-pended to each entry.\n \"\"\"\n data = []\n if not subs:\n data.append('--')\n for sub in subs:\n line_items = []\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f'{language}, {sub.title}')\n else:\n line_items.append(language)\n line_items.append(sub.format.replace('UTF-8', 'SubRip (SRT)'))\n line = '- ' + ', '.join(line_items)\n data += [(' ' + x if i > 0 else x) for i, x in enumerate(\n textwrap.wrap(line, 64))]\n return data\n\n @staticmethod\n def get_chapter_print(chapters: Dict[str, str]) ->List[str]:\n if not chapters:\n return ['--']\n return [f'- {k}: {v}' for k, v in chapters.items()]\n\n def get_chapter_print_short(self, chapters: Dict[str, str]) ->str:\n if not chapters:\n return 'No'\n if self.chapters_numbered:\n return f'Yes (Numbered 01-{str(len(chapters)).zfill(2)})'\n return 'Yes (Named)'\n\n @staticmethod\n def get_session() ->requests.Session:\n session = requests.Session()\n session.headers.update({'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Language': 'en-US,en;q=0.5', 'DNT': '1',\n 'UPGRADE-INSECURE-REQUESTS': '1'})\n return session\n",
"step-5": "import glob\nimport html\nimport os\nimport re\nimport sys\nimport textwrap\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pycountry\nimport requests\nfrom pyd2v import D2V\nfrom pymediainfo import MediaInfo, Track\n\nfrom pynfogen.formatter import CustomFormats\n\n\nclass NFO:\n AUDIO_CHANNEL_LAYOUT_WEIGHT = {\n \"LFE\": 0.1\n }\n IMDB_ID_T = re.compile(r\"^tt\\d{7,8}$\")\n TMDB_ID_T = re.compile(r\"^(tv|movie)/\\d+$\")\n TVDB_ID_T = re.compile(r\"^\\d+$\")\n\n def __init__(self) -> None:\n self.media_info: MediaInfo\n\n self.file: str\n self.season: Optional[Union[int, str]]\n self.episode: Optional[int]\n self.episode_name: Optional[str]\n\n self.videos: List[Track]\n self.audio: List[Track]\n self.subtitles: List[Track]\n self.chapters: Dict[str, str]\n self.chapters_numbered: bool\n\n self.fanart_api_key: Optional[str]\n self.source: Optional[str]\n self.note: Optional[str]\n self.preview: Optional[str]\n\n self.imdb: str\n self.tmdb: Optional[str]\n self.tvdb: Optional[int]\n\n self.title_name: str\n self.title_year: str\n self.episodes: int\n self.release_name: str\n self.preview_images: List[dict[str, str]]\n self.banner_image: Optional[str]\n\n self.session = self.get_session()\n\n def __repr__(self) -> str:\n return \"<{c} {attrs}>\".format(\n c=self.__class__.__name__,\n attrs=\" \".join(\"{}={!r}\".format(k, v) for k, v in self.__dict__.items()),\n )\n\n def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str:\n \"\"\"\n Evaluate and apply formatting on template, apply any art if provided.\n Any additional parameters are passed as extra variables to the template.\n The extra variables have priority when there's conflicting variable names.\n \"\"\"\n variables = self.__dict__\n variables.update(kwargs)\n\n template = CustomFormats().format(template, **variables)\n if art:\n art = art.format(nfo=template)\n template = art\n\n for m in re.finditer(r\"<\\?([01])\\?([\\D\\d]*?)\\?>\", template):\n # TODO: This if check is quite yucky, look into alternative options.\n # Ideally a custom format spec would be great.\n template = template.replace(\n m.group(0),\n m.group(2) if int(m.group(1)) else \"\"\n )\n\n template = \"\\n\".join(map(str.rstrip, template.splitlines(keepends=False)))\n\n return template\n\n def set_config(self, file: str, **config: Any) -> None:\n self.file = file\n self.media_info = MediaInfo.parse(self.file)\n\n self.fanart_api_key = config.get(\"fanart_api_key\")\n self.source = config.get(\"source\")\n self.note = config.get(\"note\")\n self.preview = config.get(\"preview\")\n\n self.season = config.get(\"season\")\n self.episode, self.episode_name = config.get(\"episode\") or (None, None)\n self.episodes = self.get_tv_episodes()\n self.release_name = self.get_release_name()\n\n self.videos = self.media_info.video_tracks\n self.audio = self.media_info.audio_tracks\n self.subtitles = self.media_info.text_tracks\n\n tracks_without_language = [\n x for x in self.videos + self.audio + self.subtitles\n if not x.language or x.language == \"und\"\n ]\n if tracks_without_language:\n print(\"The following tracks have no language tag! All tracks need a language tag!\")\n for track in tracks_without_language:\n print(f\"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)\")\n print(\n \"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\\n\"\n \"Don't forget to verify and add language tags to the rest of the files too!\"\n )\n sys.exit(1)\n\n chapters = next(iter(self.media_info.menu_tracks), None)\n if chapters:\n self.chapters = {\n \".\".join([k.replace(\"_\", \".\")[:-3], k[-3:]]): v.strip(\":\")\n for k, v in chapters.to_data().items()\n if f\"1{k.replace('_', '')}\".isdigit()\n }\n self.chapters_numbered = all(\n x.split(\":\", 1)[-1].lower() in [f\"chapter {i + 1}\", f\"chapter {str(i + 1).zfill(2)}\"]\n for i, x in enumerate(self.chapters.values())\n )\n else:\n self.chapters = {}\n self.chapters_numbered = False\n\n self.imdb = self.get_imdb_id(config.get(\"imdb\"))\n self.tmdb = self.get_tmdb_id(config.get(\"tmdb\"))\n self.tvdb = self.get_tvdb_id(config.get(\"tvdb\"))\n\n self.title_name, self.title_year = self.get_title_name_year()\n self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None\n self.preview_images = self.get_preview_images(self.preview) if self.preview else []\n\n def get_imdb_id(self, imdb_id: Any) -> str:\n \"\"\"\n Get an IMDB ID from either the media's global tags, or the config.\n Since IMDB IDs are required for this project, it will bug the user for\n one interactively if not found.\n \"\"\"\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get(\"imdb\")\n if not imdb_id:\n print(\"No IMDB ID was provided but is required...\")\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f\"The provided IMDB ID {user_id!r} is not valid...\")\n print(\"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\")\n else:\n imdb_id = user_id\n return imdb_id\n\n def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n \"\"\"\n Get a TMDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f\"The provided TMDB ID {tmdb_id!r} is not valid...\")\n print(\"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\")\n raise ValueError(\"Invalid TMDB ID\")\n return tmdb_id\n\n def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:\n \"\"\"\n Get a TVDB ID from either the media's global tags, or the config.\n It will raise a ValueError if the provided ID is invalid.\n \"\"\"\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get(\"tvdb\")\n if not tvdb_id:\n print(\"Warning: No TVDB ID was provided...\")\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f\"The provided TVDB ID {tvdb_id!r} is not valid...\")\n print(\"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\")\n raise ValueError(\"Invalid TVDB ID\")\n return int(tvdb_id)\n\n def get_title_name_year(self) -> Tuple[str, str]:\n \"\"\"Scrape Title Name and Year (including e.g. 2019-) from IMDB\"\"\"\n r = self.session.get(f\"https://www.imdb.com/title/{self.imdb}\")\n if r.status_code != 200:\n raise ValueError(f\"An unexpected error occurred getting IMDB Title Page [{r.status_code}]\")\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n # testing ground: https://regex101.com/r/bEoEDn/1\n r\"<title>(?P<name>.+) \\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)\"\n r\"(?P<year>(\\d{4})(|– |–\\d{4})))\\) - IMDb</title>\",\n imdb_page\n )\n if not imdb_title:\n raise ValueError(f\"Could not scrape Movie Title or Year for {self.imdb}...\")\n return imdb_title.group(\"name\").strip(), imdb_title.group(\"year\").strip()\n\n def get_tv_episodes(self) -> int:\n \"\"\"Calculate total episode count based on neighbouring same-extension files.\"\"\"\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))\n\n def get_release_name(self) -> str:\n \"\"\"\n Retrieve the release name based on the file used during MediaInfo.\n If a season was specified, but an episode number was not, it presumes the release is a Pack.\n Hence when pack, it uses the parent folder's name as the release name.\n \"\"\"\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]\n\n def get_banner_image(self, tvdb_id: int) -> Optional[str]:\n \"\"\"\n Get a wide banner image from fanart.tv.\n Currently restricts banners to English-only.\n \"\"\"\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError(\"Need Fanart.tv api key for TV titles!\")\n\n r = self.session.get(f\"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}\")\n if r.status_code == 404:\n return None\n res = r.json()\n\n error = res.get(\"error message\")\n if error:\n if error == \"Not found\":\n return None\n raise ValueError(f\"An unexpected error occurred while calling Fanart.tv, {res}\")\n\n banner = next((\n x[\"url\"] for x in (res.get(\"tvbanner\") or [])\n if x[\"lang\"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language\n ), None)\n\n return banner\n\n def get_preview_images(self, url: str) -> List[Dict[str, str]]:\n if not url:\n return []\n images = []\n for domain in [\"imgbox.com\", \"beyondhd.co\"]:\n if domain not in url.lower():\n continue\n page = self.session.get(url).text\n if domain == \"imgbox.com\":\n for m in re.finditer('src=\"(https://thumbs2.imgbox.com.+/)(\\\\w+)_b.([^\"]+)', page):\n images.append({\n \"url\": f\"https://imgbox.com/{m.group(2)}\",\n \"src\": f\"{m.group(1)}{m.group(2)}_t.{m.group(3)}\"\n })\n elif domain == \"beyondhd.co\":\n for m in re.finditer('/image/([^\"]+)\"\\\\D+src=\"(https://.*beyondhd.co/images.+/(\\\\w+).md.[^\"]+)', page):\n images.append({\n \"url\": f\"https://beyondhd.co/image/{m.group(1)}\",\n \"src\": m.group(2)\n })\n break\n return images\n\n def get_video_print(self, videos: List[Track]) -> List[List[str]]:\n if not videos:\n return [[\"--\"]]\n data = []\n for video in videos:\n codec = {\n \"MPEG Video\": f\"MPEG-{(video.format_version or '').replace('Version ', '')}\"\n }.get(video.format, video.format)\n scan_overview = video.scan_type\n vst = False\n if codec in [\"MPEG-1\", \"MPEG-2\"]:\n # parse d2v file with pyd2v, generates D2V if needed\n d2v = D2V.load(Path(self.file))\n self.file = d2v.path\n # get every frames' flag data, this contains information on displaying frames\n # add vob and cell number to each frames flag data as well\n flags = [f for line in [\n [dict(**y, vob=x[\"vob\"], cell=x[\"cell\"]) for y in x[\"flags\"]] for x in d2v.data\n ] for f in line]\n interlaced_percent = (sum(1 for f in flags if not f[\"progressive_frame\"]) / len(flags)) * 100\n if interlaced_percent == 100:\n scan_overview = \"Interlaced (CST)\"\n else:\n scan_overview = f\"{round(interlaced_percent, 2)}% Interlaced (VST)\"\n vst = True\n for ext in [\"log\", \"d2v\", \"mpg\", \"mpeg\"]:\n fp = os.path.splitext(self.file)[0] + \".\" + ext\n if os.path.exists(fp):\n os.unlink(fp)\n line_1 = \"- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}\".format(\n language=pycountry.languages.get(alpha_2=video.language).name,\n codec=codec,\n profile=video.format_profile,\n width=video.width, height=video.height,\n aspect=video.other_display_aspect_ratio[0],\n bitrate=f\"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}\"\n )\n line_2 = \" {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}\".format(\n fps=f\"{video.framerate_num}/{video.framerate_den}\" if video.framerate_num else video.frame_rate,\n fps_mode=\"VFR\" if vst else video.frame_rate_mode,\n color_space=video.color_space,\n subsampling=video.chroma_subsampling.replace(\":\", \"\"),\n bit_depth=video.bit_depth,\n scan=scan_overview\n )\n data.append([line_1, line_2])\n return data\n\n def get_audio_print(self, audio: List[Track]) -> List[str]:\n if not audio:\n return [\"--\"]\n data = []\n for t in audio:\n if t.title and \"Commentary\" in t.title:\n title = t.title\n else:\n title = pycountry.languages.get(alpha_2=t.language).name\n if t.channel_layout:\n channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(\" \")))\n else:\n channels = float(t.channel_s)\n bit_rate_mode = f\" ({t.bit_rate_mode})\" if t.bit_rate_mode else \"\"\n l1 = f\"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}\"\n data += [(\" \" + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))]\n return data\n\n @staticmethod\n def get_subtitle_print(subs: List[Track]) -> List[str]:\n \"\"\"\n Return a list of a brief subtitle overview per-subtitle.\n\n e.g.\n - English, Forced, SubRip (SRT)\n - English, SubRip (SRT)\n - English, SDH, SubRip (SRT)\n - Spanish, Latin American (SDH), SubRip (SRT)\n\n The bit of text between the Language and the Subtitle format is the Track Title.\n It can be of any format, but it is recommended to be used as shown above.\n\n It will be returned as a list of strings with the `- ` already pre-pended to each entry.\n \"\"\"\n data = []\n if not subs:\n data.append(\"--\")\n for sub in subs:\n line_items = []\n\n # following sub.title tree checks and supports three different language and title scenarios\n # The second scenario is the recommended option to choose if you are open to choosing any\n # The third scenario should be used if you have nothing unique to state about the track\n # | Language | Track Title | Output |\n # | ------------ | ----------------------------- | --------------------------------------------- |\n # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |\n # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |\n # | es / Spanish | None | - Spanish, SubRip (SRT) |\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f\"{language}, {sub.title}\")\n else:\n line_items.append(language)\n\n line_items.append(sub.format.replace(\"UTF-8\", \"SubRip (SRT)\"))\n\n line = \"- \" + \", \".join(line_items)\n data += [\n (\" \" + x if i > 0 else x)\n for i, x in enumerate(textwrap.wrap(line, 64))\n ]\n return data\n\n @staticmethod\n def get_chapter_print(chapters: Dict[str, str]) -> List[str]:\n if not chapters:\n return [\"--\"]\n return [\n f\"- {k}: {v}\"\n for k, v in chapters.items()\n ]\n\n def get_chapter_print_short(self, chapters: Dict[str, str]) -> str:\n if not chapters:\n return \"No\"\n if self.chapters_numbered:\n return f\"Yes (Numbered 01-{str(len(chapters)).zfill(2)})\"\n return \"Yes (Named)\"\n\n @staticmethod\n def get_session() -> requests.Session:\n session = requests.Session()\n session.headers.update({\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"DNT\": \"1\",\n \"UPGRADE-INSECURE-REQUESTS\": \"1\"\n })\n return session\n",
"step-ids": [
14,
16,
18,
19,
22
]
}
|
[
14,
16,
18,
19,
22
] |
import pandas as pd
# 데이터 로드
train_data = pd.read_csv('./dataset/train_park_daycare.csv')
cctv = pd.read_csv("./dataset/cctv_origin.csv", encoding="EUC-KR")
## 데이터 전처리
# 데이터 추출
cctv = cctv.iloc[1:, :2]
# 구 매핑
gu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5, '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11, '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15, '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구': 22, '송파구': 23, '은평구': 24}
gu_list = []
for i in cctv['구분']:
gu_list.append(gu_dict_num[i])
cctv['gu'] = gu_list
cctv.drop(['구분'], axis=1, inplace=True)
# 컬럼 이름 변경
cctv = cctv.rename(columns={'총계': 'cctv_num'})
# 데이터 타입 변경
cctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: "".join(x.split(',')))
cctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])
# 조인
new_data = pd.merge(train_data, cctv, on='gu', how='left')
print(new_data.info())
# 저장
new_data.to_csv("./dataset/train_add_cctv.csv", header=True, index=False)
|
normal
|
{
"blob_id": "ea2e9399a8384600d8457a9de3f263db44dc883d",
"index": 752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\n<mask token>\ncctv.drop(['구분'], axis=1, inplace=True)\n<mask token>\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n",
"step-3": "<mask token>\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')\ncctv = cctv.iloc[1:, :2]\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,\n '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,\n '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,\n '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':\n 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n",
"step-4": "import pandas as pd\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')\ncctv = cctv.iloc[1:, :2]\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,\n '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,\n '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,\n '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':\n 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n",
"step-5": "import pandas as pd\n\n# 데이터 로드\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv(\"./dataset/cctv_origin.csv\", encoding=\"EUC-KR\")\n\n## 데이터 전처리\n# 데이터 추출\ncctv = cctv.iloc[1:, :2]\n\n# 구 매핑\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5, '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11, '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15, '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구': 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\n\n# 컬럼 이름 변경\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\n\n# 데이터 타입 변경\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: \"\".join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\n\n# 조인\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\n\nprint(new_data.info())\n# 저장\nnew_data.to_csv(\"./dataset/train_add_cctv.csv\", header=True, index=False)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/15 下午5:04
# @Author : Zessay
from .ffm import *
from .fm import *
from .utils import *
from .base_model import *
from .base_trainer import *
from .logger import *
from .metric import *
from .input_fn import *
|
normal
|
{
"blob_id": "bbdb07a81d785bdf067707c4e56622a2ada76b7b",
"index": 1692,
"step-1": "<mask token>\n",
"step-2": "from .ffm import *\nfrom .fm import *\nfrom .utils import *\nfrom .base_model import *\nfrom .base_trainer import *\nfrom .logger import *\nfrom .metric import *\nfrom .input_fn import *\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/15 下午5:04\n# @Author : Zessay\n\nfrom .ffm import *\nfrom .fm import *\nfrom .utils import *\nfrom .base_model import *\nfrom .base_trainer import *\nfrom .logger import * \nfrom .metric import *\nfrom .input_fn import *",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from typing import Sequence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
def plot3D(X, Y, Z, proporcao=1, espelharZ = False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(
np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if(espelharZ):
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
#surf = ax.plot_wireframe(X, Y, Z, linewidth=1)
#plt.show()
def limitZ(Z, limit = 10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if(Z[i][j]>limit):
Z[i][j] = np.inf
if(Z[i][j]<-limit):
Z[i][j] = -np.inf
def plotPontos3D(X,Y,Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def curvaNivel(X,Y,Z,levels):
fig = plt.figure()
ax = fig.add_subplot()
curva = ax.contourf(X,Y,Z,levels)
ax.set_xlabel('X')
ax.set_ylabel('Y')
#curva.cmap.set_under('white')
#curva.cmap.set_over('cyan')
fig.colorbar(curva)
plt.show()
|
normal
|
{
"blob_id": "ff20b65f35614415ad786602c0fc2cabd08124fb",
"index": 4065,
"step-1": "<mask token>\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n",
"step-4": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n",
"step-5": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ = False):\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(\n np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]\n\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if(espelharZ):\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n #surf = ax.plot_wireframe(X, Y, Z, linewidth=1)\n\n #plt.show()\n\ndef limitZ(Z, limit = 10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if(Z[i][j]>limit):\n Z[i][j] = np.inf\n if(Z[i][j]<-limit):\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X,Y,Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n plt.show()\n\n\ndef curvaNivel(X,Y,Z,levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X,Y,Z,levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n #curva.cmap.set_under('white')\n #curva.cmap.set_over('cyan')\n fig.colorbar(curva)\n plt.show()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from proxmin import nmf
from proxmin.utils import Traceback
from proxmin import operators as po
from scipy.optimize import linear_sum_assignment
import numpy as np
import matplotlib.pyplot as plt
import time
from functools import partial
# initialize and run NMF
import logging
logging.basicConfig()
logger = logging.getLogger('proxmin')
logger.setLevel(logging.INFO)
def generateComponent(m):
"""Creates oscillating components to be mixed"""
freq = 25*np.random.random()
phase = 2*np.pi*np.random.random()
x = np.arange(m)
return np.cos(x/freq-phase)**2
def generateAmplitudes(k):
"""Makes mixing coefficients"""
res = np.array([np.random.random() for i in range(k)])
return res/res.sum()
def add_noise(Y, sigma):
"""Adds noise to Y"""
return Y + np.random.normal(0, sigma, Y.shape)
def match(A, S, trueS):
"""Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)"""
cov = np.cov(trueS, S)
k = S.shape[0]
corr = np.zeros([k,k])
for i in range(k):
for j in range(k):
corr[i][j] = cov[i + k][j]/np.sqrt(cov[i + k][i + k]*cov[j][j])
arrangement = linear_sum_assignment(-corr)
resS = np.zeros_like(S)
resAT = np.zeros_like(A.T)
for t in range(k):
resS[arrangement[1][t]] = S[arrangement[0][t]]
resAT[arrangement[1][t]] = A.T[arrangement[0][t]]
return resAT.T, resS
if __name__ == "__main__":
n = 50 # component resolution
k = 3 # number of components
b = 100 # number of observations
noise = 0.02 # stdev of added noise
np.random.seed(101)
# set up test data
trueA = np.array([generateAmplitudes(k) for i in range(b)])
trueS = np.array([generateComponent(n) for i in range(k)])
trueY = np.dot(trueA,trueS)
Y = add_noise(trueY, noise)
# if noise is variable, specify variance matrix of the same shape as Y
W = None
A = np.array([generateAmplitudes(k) for i in range(b)])
S = np.array([generateComponent(n) for i in range(k)])
p1 = partial(po.prox_unity_plus, axis=1)
proxs_g=[[p1], None]
tr = Traceback(2)
nmf(Y, A, S, W=W, prox_A=p1, e_rel=1e-6, e_abs=1e-6/noise**2, traceback=tr)
# sort components to best match inputs
A, S = match(A, S, trueS)
# show data and model
fig = plt.figure(figsize=(6,7))
ax = fig.add_subplot(311)
ax.set_title("True Components S")
ax.plot(trueS.T)
ax2 = fig.add_subplot(312)
ax2.set_title("Data Y")
ax2.plot(Y.T)
ax3 = fig.add_subplot(313)
ax3.set_title("Found Components S")
ax3.set_xlabel("Pixel")
ax3.plot(S.T)
fig.subplots_adjust(bottom=0.07, top=0.95, hspace=0.35)
fig.show()
# convergence plot from traceback
convergences = []
As = tr['X',0]
Ss = tr['X',1]
for it in range(tr.it):
Y = np.dot(As[it], Ss[it])
convergences.append(((Y - trueY)**2).sum())
fig2 = plt.figure(figsize=(6,4))
ax4 = fig2.add_subplot(111)
ax4.set_title("Convergence")
ax4.semilogy(convergences)
ax4.set_ylabel("$||Y-AS||^2$")
ax4.set_xlabel("Iterations")
fig2.show()
"""
# noise plot
#noises = np.linspace(0,0.05,21)
#repeat = 10
noises = [noise]
repeat = 1000
A_chi_squared = np.empty((len(noises), repeat))
S_chi_squared = np.empty((len(noises), repeat))
for i in range(len(noises)):
e = noises[i]
for r in range(repeat):
Y = add_noise(trueY, e)
A, S = nmf.nmf(Y, A0, S0, e_rel=1e-4, e_abs=1e-4, )
A, S = match(A, S, trueS)
A_chi_squared[i,r] = np.sum((A - trueA)**2)
S_chi_squared[i,r] = np.sum((S - trueS)**2)
fig3 = plt.figure(figsize=(6,4))
ax5 = fig3.add_subplot(111)
dof_A = A.shape[0]*A.shape[1]
dof_S = S.shape[0]*S.shape[1]
ax5.errorbar(noises, S_chi_squared.mean(axis=1)/dof_S, yerr=S_chi_squared.std(axis=1)/dof_S, label="$\chi^2_S$ / DOF")
ax5.errorbar(noises, A_chi_squared.mean(axis=1)/dof_A, yerr=A_chi_squared.std(axis=1)/dof_A, label="$\chi^2_A$ / DOF")
ax5.legend()
ax5.set_ylabel("Chi-squared")
ax5.set_xlabel("Standard deviation of noise")
fig3.show()
"""
|
normal
|
{
"blob_id": "0edc0c2f86bda0122d4b231eed700d7a5b08ec1e",
"index": 8279,
"step-1": "<mask token>\n\n\ndef generateComponent(m):\n \"\"\"Creates oscillating components to be mixed\"\"\"\n freq = 25 * np.random.random()\n phase = 2 * np.pi * np.random.random()\n x = np.arange(m)\n return np.cos(x / freq - phase) ** 2\n\n\n<mask token>\n\n\ndef add_noise(Y, sigma):\n \"\"\"Adds noise to Y\"\"\"\n return Y + np.random.normal(0, sigma, Y.shape)\n\n\ndef match(A, S, trueS):\n \"\"\"Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)\"\"\"\n cov = np.cov(trueS, S)\n k = S.shape[0]\n corr = np.zeros([k, k])\n for i in range(k):\n for j in range(k):\n corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])\n arrangement = linear_sum_assignment(-corr)\n resS = np.zeros_like(S)\n resAT = np.zeros_like(A.T)\n for t in range(k):\n resS[arrangement[1][t]] = S[arrangement[0][t]]\n resAT[arrangement[1][t]] = A.T[arrangement[0][t]]\n return resAT.T, resS\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generateComponent(m):\n \"\"\"Creates oscillating components to be mixed\"\"\"\n freq = 25 * np.random.random()\n phase = 2 * np.pi * np.random.random()\n x = np.arange(m)\n return np.cos(x / freq - phase) ** 2\n\n\ndef generateAmplitudes(k):\n \"\"\"Makes mixing coefficients\"\"\"\n res = np.array([np.random.random() for i in range(k)])\n return res / res.sum()\n\n\ndef add_noise(Y, sigma):\n \"\"\"Adds noise to Y\"\"\"\n return Y + np.random.normal(0, sigma, Y.shape)\n\n\ndef match(A, S, trueS):\n \"\"\"Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)\"\"\"\n cov = np.cov(trueS, S)\n k = S.shape[0]\n corr = np.zeros([k, k])\n for i in range(k):\n for j in range(k):\n corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])\n arrangement = linear_sum_assignment(-corr)\n resS = np.zeros_like(S)\n resAT = np.zeros_like(A.T)\n for t in range(k):\n resS[arrangement[1][t]] = S[arrangement[0][t]]\n resAT[arrangement[1][t]] = A.T[arrangement[0][t]]\n return resAT.T, resS\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogging.basicConfig()\nlogger = logging.getLogger('proxmin')\nlogger.setLevel(logging.INFO)\n\n\ndef generateComponent(m):\n \"\"\"Creates oscillating components to be mixed\"\"\"\n freq = 25 * np.random.random()\n phase = 2 * np.pi * np.random.random()\n x = np.arange(m)\n return np.cos(x / freq - phase) ** 2\n\n\ndef generateAmplitudes(k):\n \"\"\"Makes mixing coefficients\"\"\"\n res = np.array([np.random.random() for i in range(k)])\n return res / res.sum()\n\n\ndef add_noise(Y, sigma):\n \"\"\"Adds noise to Y\"\"\"\n return Y + np.random.normal(0, sigma, Y.shape)\n\n\ndef match(A, S, trueS):\n \"\"\"Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)\"\"\"\n cov = np.cov(trueS, S)\n k = S.shape[0]\n corr = np.zeros([k, k])\n for i in range(k):\n for j in range(k):\n corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])\n arrangement = linear_sum_assignment(-corr)\n resS = np.zeros_like(S)\n resAT = np.zeros_like(A.T)\n for t in range(k):\n resS[arrangement[1][t]] = S[arrangement[0][t]]\n resAT[arrangement[1][t]] = A.T[arrangement[0][t]]\n return resAT.T, resS\n\n\nif __name__ == '__main__':\n n = 50\n k = 3\n b = 100\n noise = 0.02\n np.random.seed(101)\n trueA = np.array([generateAmplitudes(k) for i in range(b)])\n trueS = np.array([generateComponent(n) for i in range(k)])\n trueY = np.dot(trueA, trueS)\n Y = add_noise(trueY, noise)\n W = None\n A = np.array([generateAmplitudes(k) for i in range(b)])\n S = np.array([generateComponent(n) for i in range(k)])\n p1 = partial(po.prox_unity_plus, axis=1)\n proxs_g = [[p1], None]\n tr = Traceback(2)\n nmf(Y, A, S, W=W, prox_A=p1, e_rel=1e-06, e_abs=1e-06 / noise ** 2,\n traceback=tr)\n A, S = match(A, S, trueS)\n fig = plt.figure(figsize=(6, 7))\n ax = fig.add_subplot(311)\n ax.set_title('True Components S')\n ax.plot(trueS.T)\n ax2 = fig.add_subplot(312)\n ax2.set_title('Data Y')\n ax2.plot(Y.T)\n ax3 = fig.add_subplot(313)\n ax3.set_title('Found Components S')\n ax3.set_xlabel('Pixel')\n ax3.plot(S.T)\n fig.subplots_adjust(bottom=0.07, top=0.95, hspace=0.35)\n fig.show()\n convergences = []\n As = tr['X', 0]\n Ss = tr['X', 1]\n for it in range(tr.it):\n Y = np.dot(As[it], Ss[it])\n convergences.append(((Y - trueY) ** 2).sum())\n fig2 = plt.figure(figsize=(6, 4))\n ax4 = fig2.add_subplot(111)\n ax4.set_title('Convergence')\n ax4.semilogy(convergences)\n ax4.set_ylabel('$||Y-AS||^2$')\n ax4.set_xlabel('Iterations')\n fig2.show()\n \"\"\"\n # noise plot\n #noises = np.linspace(0,0.05,21)\n #repeat = 10\n noises = [noise]\n repeat = 1000\n A_chi_squared = np.empty((len(noises), repeat))\n S_chi_squared = np.empty((len(noises), repeat))\n for i in range(len(noises)):\n e = noises[i]\n for r in range(repeat):\n Y = add_noise(trueY, e)\n A, S = nmf.nmf(Y, A0, S0, e_rel=1e-4, e_abs=1e-4, )\n A, S = match(A, S, trueS)\n A_chi_squared[i,r] = np.sum((A - trueA)**2)\n S_chi_squared[i,r] = np.sum((S - trueS)**2)\n fig3 = plt.figure(figsize=(6,4))\n ax5 = fig3.add_subplot(111)\n dof_A = A.shape[0]*A.shape[1]\n dof_S = S.shape[0]*S.shape[1]\n ax5.errorbar(noises, S_chi_squared.mean(axis=1)/dof_S, yerr=S_chi_squared.std(axis=1)/dof_S, label=\"$\\\\chi^2_S$ / DOF\")\n ax5.errorbar(noises, A_chi_squared.mean(axis=1)/dof_A, yerr=A_chi_squared.std(axis=1)/dof_A, label=\"$\\\\chi^2_A$ / DOF\")\n ax5.legend()\n ax5.set_ylabel(\"Chi-squared\")\n ax5.set_xlabel(\"Standard deviation of noise\")\n fig3.show()\n \"\"\"\n",
"step-4": "from proxmin import nmf\nfrom proxmin.utils import Traceback\nfrom proxmin import operators as po\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom functools import partial\nimport logging\nlogging.basicConfig()\nlogger = logging.getLogger('proxmin')\nlogger.setLevel(logging.INFO)\n\n\ndef generateComponent(m):\n \"\"\"Creates oscillating components to be mixed\"\"\"\n freq = 25 * np.random.random()\n phase = 2 * np.pi * np.random.random()\n x = np.arange(m)\n return np.cos(x / freq - phase) ** 2\n\n\ndef generateAmplitudes(k):\n \"\"\"Makes mixing coefficients\"\"\"\n res = np.array([np.random.random() for i in range(k)])\n return res / res.sum()\n\n\ndef add_noise(Y, sigma):\n \"\"\"Adds noise to Y\"\"\"\n return Y + np.random.normal(0, sigma, Y.shape)\n\n\ndef match(A, S, trueS):\n \"\"\"Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)\"\"\"\n cov = np.cov(trueS, S)\n k = S.shape[0]\n corr = np.zeros([k, k])\n for i in range(k):\n for j in range(k):\n corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])\n arrangement = linear_sum_assignment(-corr)\n resS = np.zeros_like(S)\n resAT = np.zeros_like(A.T)\n for t in range(k):\n resS[arrangement[1][t]] = S[arrangement[0][t]]\n resAT[arrangement[1][t]] = A.T[arrangement[0][t]]\n return resAT.T, resS\n\n\nif __name__ == '__main__':\n n = 50\n k = 3\n b = 100\n noise = 0.02\n np.random.seed(101)\n trueA = np.array([generateAmplitudes(k) for i in range(b)])\n trueS = np.array([generateComponent(n) for i in range(k)])\n trueY = np.dot(trueA, trueS)\n Y = add_noise(trueY, noise)\n W = None\n A = np.array([generateAmplitudes(k) for i in range(b)])\n S = np.array([generateComponent(n) for i in range(k)])\n p1 = partial(po.prox_unity_plus, axis=1)\n proxs_g = [[p1], None]\n tr = Traceback(2)\n nmf(Y, A, S, W=W, prox_A=p1, e_rel=1e-06, e_abs=1e-06 / noise ** 2,\n traceback=tr)\n A, S = match(A, S, trueS)\n fig = plt.figure(figsize=(6, 7))\n ax = fig.add_subplot(311)\n ax.set_title('True Components S')\n ax.plot(trueS.T)\n ax2 = fig.add_subplot(312)\n ax2.set_title('Data Y')\n ax2.plot(Y.T)\n ax3 = fig.add_subplot(313)\n ax3.set_title('Found Components S')\n ax3.set_xlabel('Pixel')\n ax3.plot(S.T)\n fig.subplots_adjust(bottom=0.07, top=0.95, hspace=0.35)\n fig.show()\n convergences = []\n As = tr['X', 0]\n Ss = tr['X', 1]\n for it in range(tr.it):\n Y = np.dot(As[it], Ss[it])\n convergences.append(((Y - trueY) ** 2).sum())\n fig2 = plt.figure(figsize=(6, 4))\n ax4 = fig2.add_subplot(111)\n ax4.set_title('Convergence')\n ax4.semilogy(convergences)\n ax4.set_ylabel('$||Y-AS||^2$')\n ax4.set_xlabel('Iterations')\n fig2.show()\n \"\"\"\n # noise plot\n #noises = np.linspace(0,0.05,21)\n #repeat = 10\n noises = [noise]\n repeat = 1000\n A_chi_squared = np.empty((len(noises), repeat))\n S_chi_squared = np.empty((len(noises), repeat))\n for i in range(len(noises)):\n e = noises[i]\n for r in range(repeat):\n Y = add_noise(trueY, e)\n A, S = nmf.nmf(Y, A0, S0, e_rel=1e-4, e_abs=1e-4, )\n A, S = match(A, S, trueS)\n A_chi_squared[i,r] = np.sum((A - trueA)**2)\n S_chi_squared[i,r] = np.sum((S - trueS)**2)\n fig3 = plt.figure(figsize=(6,4))\n ax5 = fig3.add_subplot(111)\n dof_A = A.shape[0]*A.shape[1]\n dof_S = S.shape[0]*S.shape[1]\n ax5.errorbar(noises, S_chi_squared.mean(axis=1)/dof_S, yerr=S_chi_squared.std(axis=1)/dof_S, label=\"$\\\\chi^2_S$ / DOF\")\n ax5.errorbar(noises, A_chi_squared.mean(axis=1)/dof_A, yerr=A_chi_squared.std(axis=1)/dof_A, label=\"$\\\\chi^2_A$ / DOF\")\n ax5.legend()\n ax5.set_ylabel(\"Chi-squared\")\n ax5.set_xlabel(\"Standard deviation of noise\")\n fig3.show()\n \"\"\"\n",
"step-5": "from proxmin import nmf\r\nfrom proxmin.utils import Traceback\r\nfrom proxmin import operators as po\r\nfrom scipy.optimize import linear_sum_assignment\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom functools import partial\r\n\r\n# initialize and run NMF\r\nimport logging\r\nlogging.basicConfig()\r\nlogger = logging.getLogger('proxmin')\r\nlogger.setLevel(logging.INFO)\r\n\r\ndef generateComponent(m):\r\n \"\"\"Creates oscillating components to be mixed\"\"\"\r\n freq = 25*np.random.random()\r\n phase = 2*np.pi*np.random.random()\r\n x = np.arange(m)\r\n return np.cos(x/freq-phase)**2\r\n\r\ndef generateAmplitudes(k):\r\n \"\"\"Makes mixing coefficients\"\"\"\r\n res = np.array([np.random.random() for i in range(k)])\r\n return res/res.sum()\r\n\r\ndef add_noise(Y, sigma):\r\n \"\"\"Adds noise to Y\"\"\"\r\n return Y + np.random.normal(0, sigma, Y.shape)\r\n\r\ndef match(A, S, trueS):\r\n \"\"\"Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)\"\"\"\r\n cov = np.cov(trueS, S)\r\n k = S.shape[0]\r\n corr = np.zeros([k,k])\r\n for i in range(k):\r\n for j in range(k):\r\n corr[i][j] = cov[i + k][j]/np.sqrt(cov[i + k][i + k]*cov[j][j])\r\n arrangement = linear_sum_assignment(-corr)\r\n resS = np.zeros_like(S)\r\n resAT = np.zeros_like(A.T)\r\n for t in range(k):\r\n resS[arrangement[1][t]] = S[arrangement[0][t]]\r\n resAT[arrangement[1][t]] = A.T[arrangement[0][t]]\r\n return resAT.T, resS\r\n\r\nif __name__ == \"__main__\":\r\n n = 50 \t\t\t# component resolution\r\n k = 3 \t\t\t# number of components\r\n b = 100\t\t\t# number of observations\r\n noise = 0.02 # stdev of added noise\r\n np.random.seed(101)\r\n\r\n # set up test data\r\n trueA = np.array([generateAmplitudes(k) for i in range(b)])\r\n trueS = np.array([generateComponent(n) for i in range(k)])\r\n trueY = np.dot(trueA,trueS)\r\n Y = add_noise(trueY, noise)\r\n # if noise is variable, specify variance matrix of the same shape as Y\r\n W = None\r\n\r\n A = np.array([generateAmplitudes(k) for i in range(b)])\r\n S = np.array([generateComponent(n) for i in range(k)])\r\n p1 = partial(po.prox_unity_plus, axis=1)\r\n proxs_g=[[p1], None]\r\n tr = Traceback(2)\r\n nmf(Y, A, S, W=W, prox_A=p1, e_rel=1e-6, e_abs=1e-6/noise**2, traceback=tr)\r\n # sort components to best match inputs\r\n A, S = match(A, S, trueS)\r\n\r\n # show data and model\r\n fig = plt.figure(figsize=(6,7))\r\n ax = fig.add_subplot(311)\r\n ax.set_title(\"True Components S\")\r\n ax.plot(trueS.T)\r\n ax2 = fig.add_subplot(312)\r\n ax2.set_title(\"Data Y\")\r\n ax2.plot(Y.T)\r\n ax3 = fig.add_subplot(313)\r\n ax3.set_title(\"Found Components S\")\r\n ax3.set_xlabel(\"Pixel\")\r\n ax3.plot(S.T)\r\n fig.subplots_adjust(bottom=0.07, top=0.95, hspace=0.35)\r\n fig.show()\r\n\r\n # convergence plot from traceback\r\n convergences = []\r\n As = tr['X',0]\r\n Ss = tr['X',1]\r\n for it in range(tr.it):\r\n Y = np.dot(As[it], Ss[it])\r\n convergences.append(((Y - trueY)**2).sum())\r\n fig2 = plt.figure(figsize=(6,4))\r\n ax4 = fig2.add_subplot(111)\r\n ax4.set_title(\"Convergence\")\r\n ax4.semilogy(convergences)\r\n ax4.set_ylabel(\"$||Y-AS||^2$\")\r\n ax4.set_xlabel(\"Iterations\")\r\n fig2.show()\r\n\r\n \"\"\"\r\n # noise plot\r\n #noises = np.linspace(0,0.05,21)\r\n #repeat = 10\r\n noises = [noise]\r\n repeat = 1000\r\n A_chi_squared = np.empty((len(noises), repeat))\r\n S_chi_squared = np.empty((len(noises), repeat))\r\n for i in range(len(noises)):\r\n e = noises[i]\r\n for r in range(repeat):\r\n Y = add_noise(trueY, e)\r\n A, S = nmf.nmf(Y, A0, S0, e_rel=1e-4, e_abs=1e-4, )\r\n A, S = match(A, S, trueS)\r\n A_chi_squared[i,r] = np.sum((A - trueA)**2)\r\n S_chi_squared[i,r] = np.sum((S - trueS)**2)\r\n fig3 = plt.figure(figsize=(6,4))\r\n ax5 = fig3.add_subplot(111)\r\n dof_A = A.shape[0]*A.shape[1]\r\n dof_S = S.shape[0]*S.shape[1]\r\n ax5.errorbar(noises, S_chi_squared.mean(axis=1)/dof_S, yerr=S_chi_squared.std(axis=1)/dof_S, label=\"$\\chi^2_S$ / DOF\")\r\n ax5.errorbar(noises, A_chi_squared.mean(axis=1)/dof_A, yerr=A_chi_squared.std(axis=1)/dof_A, label=\"$\\chi^2_A$ / DOF\")\r\n ax5.legend()\r\n ax5.set_ylabel(\"Chi-squared\")\r\n ax5.set_xlabel(\"Standard deviation of noise\")\r\n fig3.show()\r\n \"\"\"\r\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
class Solution:
# @param num, a list of integer
# @return an integer
def rob(self, num):
n = len(num)
if n == 0:
return 0
if(n == 1):
return num[0]
f = [0] * n
f[0] = num[0]
f[1] = max(num[0],num[1])
for i in xrange(2,n):
f[i] = max(f[i-1],f[i-2] + num[i])
return f[n-1]
|
normal
|
{
"blob_id": "bca0baaffefed6917939614defadf9960ffa4727",
"index": 8062,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def rob(self, num):\n n = len(num)\n if n == 0:\n return 0\n if n == 1:\n return num[0]\n f = [0] * n\n f[0] = num[0]\n f[1] = max(num[0], num[1])\n for i in xrange(2, n):\n f[i] = max(f[i - 1], f[i - 2] + num[i])\n return f[n - 1]\n",
"step-4": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def rob(self, num):\n n = len(num)\n if n == 0:\n return 0\n if(n == 1):\n return num[0]\n f = [0] * n\n f[0] = num[0]\n f[1] = max(num[0],num[1])\n for i in xrange(2,n):\n f[i] = max(f[i-1],f[i-2] + num[i])\n return f[n-1]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sys import exit
def hard():
print("Nice! Let's try something harder")
print("Could you calculate this for me?")
print("4 * 35 + 18 / 2 = ")
aws = input(">")
while True:
if aws == "176":
print("Nice, you correctly answer all the questions")
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print("Ok, seems like you are not good at math.")
print("What about this.")
print("Say you have 10 apples, your Mom gave you another 2.")
print("How many apples you have now?")
choice = input("> ")
if choice == "12":
print("You did a good job!")
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print("How old are you?")
choice = input("> ")
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input("> ")
while True:
if "y" in choice:
hard()
elif "n" in choice:
easy()
else:
print("I don't know what that mean")
start()
|
normal
|
{
"blob_id": "5d05351cd6cd6c0d216e8bc09308532605bfd26e",
"index": 3007,
"step-1": "<mask token>\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-4": "from sys import exit\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-5": "from sys import exit\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print(\"Could you calculate this for me?\")\n print(\"4 * 35 + 18 / 2 = \")\n\n aws = input(\">\")\n\n while True:\n if aws == \"176\":\n print(\"Nice, you correctly answer all the questions\")\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print(\"Ok, seems like you are not good at math.\")\n print(\"What about this.\")\n print(\"Say you have 10 apples, your Mom gave you another 2.\")\n print(\"How many apples you have now?\")\n\n choice = input(\"> \")\n\n if choice == \"12\":\n print(\"You did a good job!\")\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print(\"How old are you?\")\n\n choice = input(\"> \")\n age = int(choice) + 20\n\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n\n choice = input(\"> \")\n\n while True:\n if \"y\" in choice:\n hard()\n elif \"n\" in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
class Constants():
DNN_DEFAULT_ACTIVATION = 'relu'
DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_LOSS = 'mean_squared_error'
DNN_DEFAULT_VALIDATION_SPLIT = 0.2
DNN_DEFAULT_EPOCHS = 100
DNN_DEFAULT_CHECKPOINT_PERIOD = 100
DNN_DEFAULT_VALIDATION_PERIOD = 1
DNN_DEFAULT_PATIENCE = 1000
DNN_DEFAULT_BATCH_SIZE = 16
DNN_DEFAULT_OPTIMIZER = 'adam'
DNN_DEFAULT_DROPOUT_RATE = 0.02
DNN_DEFAULT_DECAY = 0
DNN_DEFAULT_BIAS = 0.1
DNN_DEFAULT_OUTPUT_BIAS = 0.5
|
normal
|
{
"blob_id": "b2bb7393bf7955f5de30c59364b495b8f888e178",
"index": 4073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Constants:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Constants:\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5\n",
"step-4": "import numpy as np\n\n\nclass Constants:\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5\n",
"step-5": "import numpy as np\n\nclass Constants():\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-5]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-5]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" AuthService class module.
"""
from urllib.parse import urlencode
from http.client import HTTPConnection, HTTPResponse, HTTPException
from dms2021sensor.data.rest.exc import NotFoundError
class AuthService():
""" REST client to connect to the authentication service.
"""
def __init__(self, host: str, port: int):
""" Constructor method.
Initializes the client.
---
Parameters:
- host: The authentication service host string.
- port: The authentication service port number.
"""
self.__host: str = host
self.__port: int = port
def __get_connection(self) -> HTTPConnection:
""" Creates a new connection to the authentication server.
---
Returns:
The connection object.
"""
return HTTPConnection(self.__host, self.__port)
def has_right(self, username: str, right: str) -> bool:
""" Determines whether a given user from the authentication server
has a certain right or not.
---
Parameters:
- username: The user name string.
- right: The right name.
Returns:
True if the user has the given right
Throws:
- NotFoundError: if the user does not have the right, the user does not
exist, or the right does not exist.
- HTTPException: On an unhandled 500 error.
"""
form: str = urlencode({'username': username, 'right': right})
headers: dict = {
'Content-type': 'application/x-www-form-urlencoded'
}
connection: HTTPConnection = self.__get_connection()
connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)
response: HTTPResponse = connection.getresponse()
if response.status == 200:
return True
if response.status == 404:
raise NotFoundError()
if response.status == 500:
raise HTTPException('Server error')
return False
|
normal
|
{
"blob_id": "1438a268780217e647999ba031aa4a50a6912d2f",
"index": 3069,
"step-1": "<mask token>\n\n\nclass AuthService:\n <mask token>\n <mask token>\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthService:\n <mask token>\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthService:\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) ->bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {'Content-type': 'application/x-www-form-urlencoded'}\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/' + str(username) + '/rights/' +\n str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-4": "<mask token>\nfrom urllib.parse import urlencode\nfrom http.client import HTTPConnection, HTTPResponse, HTTPException\nfrom dms2021sensor.data.rest.exc import NotFoundError\n\n\nclass AuthService:\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) ->bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {'Content-type': 'application/x-www-form-urlencoded'}\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/' + str(username) + '/rights/' +\n str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-5": "\"\"\" AuthService class module.\n\"\"\"\n\nfrom urllib.parse import urlencode\nfrom http.client import HTTPConnection, HTTPResponse, HTTPException\nfrom dms2021sensor.data.rest.exc import NotFoundError\n\n\nclass AuthService():\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) -> HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) -> bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {\n 'Content-type': 'application/x-www-form-urlencoded'\n }\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
from youtube_transcript_api import YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')
transcript = transcript_list.find_transcript(['en'])
transcript = transcript.fetch()
with open("transcript.txt", 'w') as f:
for line in transcript:
f.write(line['text']+ '\n')
|
normal
|
{
"blob_id": "c2d6e4286e1b9d6dc852bde994da60d353e03e5c",
"index": 8031,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-3": "<mask token>\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-4": "from youtube_transcript_api import YouTubeTranscriptApi\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-5": "from youtube_transcript_api import YouTubeTranscriptApi\n\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\n\nwith open(\"transcript.txt\", 'w') as f:\n for line in transcript:\n f.write(line['text']+ '\\n')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 문제 풀이 진행중..(나중에 재도전)
import collections
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
# 전체 연결점 개수 확인한다.
# 개수가 적은 것 부터 처리한다
# # 연결된 게 0개인 애들은 제외
#
# data init
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
# set data(connecting count of stones)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
s = Solution()
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2],[2,3]]
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2]]
# temp_value = [[0,0],[0,2],[1,1],[2,0],[2,2]]
temp_value = [[3,2],[3,1],[4,4],[1,1],[0,2],[4,0]]
print(s.removeStones(temp_value))
|
normal
|
{
"blob_id": "896329a8b14d79f849e4a8c31c697f3981395790",
"index": 3327,
"step-1": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\n<mask token>\nprint(s.removeStones(temp_value))\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\ntemp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]\nprint(s.removeStones(temp_value))\n",
"step-4": "import collections\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\ntemp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]\nprint(s.removeStones(temp_value))\n",
"step-5": "# 문제 풀이 진행중..(나중에 재도전)\nimport collections\nclass Solution(object):\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n # 전체 연결점 개수 확인한다.\n # 개수가 적은 것 부터 처리한다\n # # 연결된 게 0개인 애들은 제외\n #\n\n # data init\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n\n # set data(connecting count of stones)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j][1]:\n connect_count += 1\n\n stones_share_list[i] = connect_count\n\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n\n if connect_sum is 0:\n return 0\n\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\n\n# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2],[2,3]]\n# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2]]\n# temp_value = [[0,0],[0,2],[1,1],[2,0],[2,2]]\ntemp_value = [[3,2],[3,1],[4,4],[1,1],[0,2],[4,0]]\nprint(s.removeStones(temp_value))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
tej="votary"
for i in range(5):
print(tej[i])
|
normal
|
{
"blob_id": "1f385fda1bdc0008ff91b935998c95c8ffcbd297",
"index": 2797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n print(tej[i])\n",
"step-3": "tej = 'votary'\nfor i in range(5):\n print(tej[i])\n",
"step-4": "tej=\"votary\"\nfor i in range(5):\n\tprint(tej[i])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'Joe'
import sys
sys.path.insert(0,'../src/')
import grocery_functions
import unittest
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
self.assertTrue(recipe_names[0] == "Cajun Chicken & Rice")
self.assertTrue(recipe_names[1] == "Chicken Curry in a Hurry")
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] == 'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] == 'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list=grocery_functions.get_ingredients_from_recipe_file("test-recipes\Kielbasa, Pepper, Onion and Potato Hash.txt")
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list=grocery_functions.get_tags_from_recipe_file("test-recipes\Chicken Curry in a Hurry.txt")
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list=grocery_functions.get_recipe_from_recipe_file("test-recipes\Healthy Roasted Chicken and Veggies (one pan).txt")
self.assertTrue(list[2]=="1 cup bell pepper, chopped (any colors you like)")
self.assertTrue(list[10]=="1 teaspoon italian seasoning")
self.assertTrue(list[15]=="Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, ")
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
grocery_list=[]
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file("test-recipes\\"+recipe+".txt")
grocery_list=grocery_functions.condense_grocery_list(grocery_list)
# grocery_functions.print_grocery_list(grocery_list)
# grocery_functions.sort_and_print_grocery_List(grocery_list, "Smiths-Eu-JT-ItemDepartments.txt")
def test_makeAllIngredientsFile(self):
grocery_functions.make_all_ingredients_file()
def test_getItemDeptDicts(self):
grocery_functions.get_item_dept_dicts("Smiths-Eu-JT-ItemDepartments.txt")
def test_checkRecipeFormat(self):
errors=grocery_functions.check_recipe_format("test-recipes", False)
self.assertTrue(errors == [])
errors=grocery_functions.check_recipe_format("broken-test-recipes", False)
self.assertTrue('invalid format, "1 lb, chicken breasts" in: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('invalid heading, "wrong_header" in file: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors)
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.get_all_ingredients("test-recipes"))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
unittest.TextTestRunner(verbosity=2).run(suite)
|
normal
|
{
"blob_id": "c4fbf206482a04f3e2d2aa98a0dbf525a176c4e7",
"index": 1087,
"step-1": "<mask token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <mask token>\n <mask token>\n <mask token>\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n <mask token>\n <mask token>\n <mask token>\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n <mask token>\n <mask token>\n <mask token>\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, '../src/')\n<mask token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"step-4": "__author__ = 'Joe'\nimport sys\nsys.path.insert(0, '../src/')\nimport grocery_functions\nimport unittest\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"step-5": "__author__ = 'Joe'\nimport sys\nsys.path.insert(0,'../src/')\n\nimport grocery_functions\nimport unittest\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names(\"test-recipes\")\n self.assertTrue(recipe_names[0] == \"Cajun Chicken & Rice\")\n self.assertTrue(recipe_names[1] == \"Chicken Curry in a Hurry\")\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] == 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] == 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list=grocery_functions.get_ingredients_from_recipe_file(\"test-recipes\\Kielbasa, Pepper, Onion and Potato Hash.txt\")\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list=grocery_functions.get_tags_from_recipe_file(\"test-recipes\\Chicken Curry in a Hurry.txt\")\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list=grocery_functions.get_recipe_from_recipe_file(\"test-recipes\\Healthy Roasted Chicken and Veggies (one pan).txt\")\n self.assertTrue(list[2]==\"1 cup bell pepper, chopped (any colors you like)\")\n self.assertTrue(list[10]==\"1 teaspoon italian seasoning\")\n self.assertTrue(list[15]==\"Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, \")\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names(\"test-recipes\")\n grocery_list=[]\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\"test-recipes\\\\\"+recipe+\".txt\")\n grocery_list=grocery_functions.condense_grocery_list(grocery_list)\n # grocery_functions.print_grocery_list(grocery_list)\n # grocery_functions.sort_and_print_grocery_List(grocery_list, \"Smiths-Eu-JT-ItemDepartments.txt\")\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\"Smiths-Eu-JT-ItemDepartments.txt\")\n\n def test_checkRecipeFormat(self):\n errors=grocery_functions.check_recipe_format(\"test-recipes\", False)\n self.assertTrue(errors == [])\n errors=grocery_functions.check_recipe_format(\"broken-test-recipes\", False)\n self.assertTrue('invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt' in errors)\n self.assertTrue('invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt' in errors)\n self.assertTrue('Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors)\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.get_all_ingredients(\"test-recipes\"))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)",
"step-ids": [
4,
8,
12,
14,
15
]
}
|
[
4,
8,
12,
14,
15
] |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from BugBytes import views
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'species', views.SpeciesViewSet)
router.register(r'com_names', views.Com_NamesViewSet)
router.register(r'photos', views.PhotosViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'),
path('', views.landing, name='landing'),
path('model_json/', views.model_json, name='model_json'),
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "786bc5d44115b46bd246e85e85c8f8c1f20737b9",
"index": 7921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\n<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\n\nrouter = routers.DefaultRouter()\nrouter.register(r'species', views.SpeciesViewSet)\nrouter.register(r'com_names', views.Com_NamesViewSet)\nrouter.register(r'photos', views.PhotosViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'),\n path('', views.landing, name='landing'),\n path('model_json/', views.model_json, name='model_json'),\n]\n\nif settings.DEBUG: # new\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
from pandas import (
Index,
NaT,
)
import pandas._testing as tm
def test_astype_str_from_bytes():
# https://github.com/pandas-dev/pandas/issues/38607
idx = Index(["あ", b"a"], dtype="object")
result = idx.astype(str)
expected = Index(["あ", "a"], dtype="object")
tm.assert_index_equal(result, expected)
def test_astype_invalid_nas_to_tdt64_raises():
# GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT
idx = Index([NaT.asm8] * 2, dtype=object)
msg = r"Cannot cast Index to dtype timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
idx.astype("m8[ns]")
|
normal
|
{
"blob_id": "13b2fea09f5a4300563dd8870fe1841b47756b36",
"index": 9972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-3": "<mask token>\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-4": "import pytest\nfrom pandas import Index, NaT\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-5": "import pytest\n\nfrom pandas import (\n Index,\n NaT,\n)\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n # https://github.com/pandas-dev/pandas/issues/38607\n idx = Index([\"あ\", b\"a\"], dtype=\"object\")\n result = idx.astype(str)\n expected = Index([\"あ\", \"a\"], dtype=\"object\")\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n # GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT\n idx = Index([NaT.asm8] * 2, dtype=object)\n\n msg = r\"Cannot cast Index to dtype timedelta64\\[ns\\]\"\n with pytest.raises(TypeError, match=msg):\n idx.astype(\"m8[ns]\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from os import environ
import boto3
from flask import Flask, redirect
from flask_sqlalchemy import SQLAlchemy
from json import load
from pathlib import Path
path = Path(__file__).parent
db = SQLAlchemy()
with open(path / "../schemas.json", "r") as fp:
schemas = load(fp)
with open(path / "../config.json", "r") as fp:
config = load(fp)
app = Flask(__name__, template_folder="templates")
app.config["SECRET_KEY"] = "3205fc85cd004116bfe218f14192e49a"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///app.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SWAGGER_UI_OAUTH_CLIENT_ID"] = "documentation"
domain = app.config.get("SERVER_NAME")
port = environ.get("PORT", config["default_port"])
redirect_uri = environ.get("REDIRECT_URI", config["redirect_uri"])
client_uri = environ.get("CLIENT_URI", config["client_uri"])
client_s3 = boto3.resource("s3")
@app.route("/")
def redirect_to_swagger():
return redirect("/swagger", 302)
|
normal
|
{
"blob_id": "631904ae96584bd19756f9335175a419397ac252",
"index": 8562,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-2": "<mask token>\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\n<mask token>\n\n\[email protected]('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-3": "<mask token>\npath = Path(__file__).parent\ndb = SQLAlchemy()\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\napp = Flask(__name__, template_folder='templates')\napp.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'\ndomain = app.config.get('SERVER_NAME')\nport = environ.get('PORT', config['default_port'])\nredirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])\nclient_uri = environ.get('CLIENT_URI', config['client_uri'])\nclient_s3 = boto3.resource('s3')\n\n\[email protected]('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-4": "from os import environ\nimport boto3\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom json import load\nfrom pathlib import Path\npath = Path(__file__).parent\ndb = SQLAlchemy()\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\napp = Flask(__name__, template_folder='templates')\napp.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'\ndomain = app.config.get('SERVER_NAME')\nport = environ.get('PORT', config['default_port'])\nredirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])\nclient_uri = environ.get('CLIENT_URI', config['client_uri'])\nclient_s3 = boto3.resource('s3')\n\n\[email protected]('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-5": "from os import environ\n\nimport boto3\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom json import load\nfrom pathlib import Path\n\n\npath = Path(__file__).parent\n\n\ndb = SQLAlchemy()\n\nwith open(path / \"../schemas.json\", \"r\") as fp:\n schemas = load(fp)\n\nwith open(path / \"../config.json\", \"r\") as fp:\n config = load(fp)\n\napp = Flask(__name__, template_folder=\"templates\")\napp.config[\"SECRET_KEY\"] = \"3205fc85cd004116bfe218f14192e49a\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///app.db\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"SWAGGER_UI_OAUTH_CLIENT_ID\"] = \"documentation\"\ndomain = app.config.get(\"SERVER_NAME\")\n\n\nport = environ.get(\"PORT\", config[\"default_port\"])\nredirect_uri = environ.get(\"REDIRECT_URI\", config[\"redirect_uri\"])\nclient_uri = environ.get(\"CLIENT_URI\", config[\"client_uri\"])\n\nclient_s3 = boto3.resource(\"s3\")\n\n\[email protected](\"/\")\ndef redirect_to_swagger():\n return redirect(\"/swagger\", 302)\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# A perfect number is a number for which the sum of its proper divisors is exactly equal to the number.
# For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28,
# which means that 28 is a perfect number.
#
# A number whose proper divisors are less than the number is called deficient and
# a number whose proper divisors exceed the number is called abundant.
#
# As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16,
# the smallest number that can be written as the sum of two abundant numbers is 24.
# By mathematical analysis, it can be shown that all integers greater than 28123
# can be written as the sum of two abundant numbers.
# However, this upper limit cannot be reduced any further by analysis even though
# it is known that the greatest number that cannot be expressed as the sum of two abundant numbers
# is less than this limit.
#
# Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
UPPER_LIMIT = 28124
import math
import cProfile
from bisect import bisect
def sum_divisors(N):
total = 1
for i in xrange(2, math.sqrt(N)+1):
if (N % i == 0):
total += i
if ((i * i) != N):
total += (N / i)
return total
abundant = []
for i in xrange(11, UPPER_LIMIT):
if (sum_divisors(i) > i):
abundant.append(i)
print "found: ", len(abundant), " abundant numbers less than ", UPPER_LIMIT
print "highest abundant number: ", abundant[-1]
# Smart: compute all the sums of the abundant numbers we have. Store everything in an array.
def AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers():
# Create an array that is zero everywhere, then punch out the number
# that are expressible as the sum of two abundant numbers
integers = [0] * UPPER_LIMIT
for i in xrange(0, len(abundant)):
for j in xrange(i, len(abundant)):
addend = abundant[i] + abundant[j]
if (addend < UPPER_LIMIT):
integers[addend] = 1
else:
break; #don't bother going this high
# We've filled in the array. Now do the sum
return sum(i for i in xrange(0, UPPER_LIMIT) if integers[i] == 0)
#cProfile.run('AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()')
print AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()
# Somebody else (norvig) did this, which is really slick!
def norvig():
abundants = set(i for i in range(1,28124) if sum_divisors(i) > i)
def abundantsum(i):
return any(i-a in abundants for a in abundants)
return sum(i for i in range(1,28124) if not abundantsum(i))
|
normal
|
{
"blob_id": "8ca77ed608108a9aa693acb686156e661794d7ab",
"index": 394,
"step-1": "# A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. \r\n# For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, \r\n# which means that 28 is a perfect number.\r\n#\r\n# A number whose proper divisors are less than the number is called deficient and \r\n# a number whose proper divisors exceed the number is called abundant.\r\n#\r\n# As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, \r\n# the smallest number that can be written as the sum of two abundant numbers is 24. \r\n# By mathematical analysis, it can be shown that all integers greater than 28123 \r\n# can be written as the sum of two abundant numbers. \r\n# However, this upper limit cannot be reduced any further by analysis even though\r\n# it is known that the greatest number that cannot be expressed as the sum of two abundant numbers \r\n# is less than this limit.\r\n#\r\n# Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.\r\n\r\nUPPER_LIMIT = 28124\r\n\r\nimport math\r\nimport cProfile\r\nfrom bisect import bisect\r\ndef sum_divisors(N):\r\n total = 1\r\n for i in xrange(2, math.sqrt(N)+1):\r\n if (N % i == 0):\r\n total += i\r\n if ((i * i) != N):\r\n total += (N / i)\r\n return total\r\n\r\nabundant = []\r\nfor i in xrange(11, UPPER_LIMIT):\r\n if (sum_divisors(i) > i):\r\n abundant.append(i)\r\n\r\n\r\nprint \"found: \", len(abundant), \" abundant numbers less than \", UPPER_LIMIT\r\nprint \"highest abundant number: \", abundant[-1]\r\n\r\n# Smart: compute all the sums of the abundant numbers we have. Store everything in an array.\r\ndef AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers():\r\n # Create an array that is zero everywhere, then punch out the number\r\n # that are expressible as the sum of two abundant numbers\r\n integers = [0] * UPPER_LIMIT\r\n for i in xrange(0, len(abundant)):\r\n for j in xrange(i, len(abundant)):\r\n addend = abundant[i] + abundant[j]\r\n if (addend < UPPER_LIMIT):\r\n integers[addend] = 1\r\n else:\r\n break; #don't bother going this high\r\n\r\n # We've filled in the array. Now do the sum\r\n return sum(i for i in xrange(0, UPPER_LIMIT) if integers[i] == 0)\r\n\r\n#cProfile.run('AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()')\r\nprint AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()\r\n\r\n\r\n# Somebody else (norvig) did this, which is really slick!\r\ndef norvig():\r\n abundants = set(i for i in range(1,28124) if sum_divisors(i) > i)\r\n def abundantsum(i):\r\n return any(i-a in abundants for a in abundants)\r\n return sum(i for i in range(1,28124) if not abundantsum(i))\r\n\r\n \r\n \r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import dlib
import cv2
import imageio
import torch
from PIL import Image
from model import AgeGenderModel
from mix_model import MixModel
from torchvision.transforms import transforms
from tqdm import tqdm
from retinaface.pre_trained_models import get_model
transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
# Load model age gender
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load("outputs_w_free/model_epoch_50.pth")
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model("resnet50_2020-07-20", max_size=512, device='cuda:1')
model_face.eval()
# load the detector
detector = dlib.get_frontal_face_detector()
FPS = 30
# read the video
out_video = imageio.get_writer("/home/cybercore/haimd/w_freeze_osaka.mp4", format='mp4', mode='I', fps=FPS)
video = imageio.get_reader("/home/cybercore/haimd/osaka.mp4")
for img in tqdm(video):
if img is not None:
# gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
# faces = detector(gray)
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
# for face in faces:
# print(face)
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0]-20
y1_face = bbox[1]-20
x2_face = bbox[2]+20
y2_face = bbox[3]+20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
|
normal
|
{
"blob_id": "1cc14836808d70c1e53a9ca948a52776ebc89f4a",
"index": 4624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n<mask token>\nmodel_face.eval()\n<mask token>\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-3": "<mask token>\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-4": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image\nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-5": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image \nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\n\n\ntransform = transforms.Compose([\n transforms.Resize((112, 112)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n])\n\n# Load model age gender\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load(\"outputs_w_free/model_epoch_50.pth\")\n\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n\nmodel_face = get_model(\"resnet50_2020-07-20\", max_size=512, device='cuda:1')\nmodel_face.eval()\n\n# load the detector\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\n# read the video\nout_video = imageio.get_writer(\"/home/cybercore/haimd/w_freeze_osaka.mp4\", format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader(\"/home/cybercore/haimd/osaka.mp4\")\nfor img in tqdm(video):\n if img is not None:\n # gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)\n \n # faces = detector(gray)\n \n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n \n \n # for face in faces:\n # print(face)\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n \n x1_face = bbox[0]-20\n y1_face = bbox[1]-20\n x2_face = bbox[2]+20\n y2_face = bbox[3]+20\n if x1_face > 0 and y1_face > 0:\n \n img_face = img[y1_face:y2_face, x1_face:x2_face]\n \n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device) \n\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import suppress
import asyncio
import shutil
from aiohttp import web
from bot import app
from var import var
from logger import update_logging_files
loop = asyncio.get_event_loop()
def import_handlers():
from deezer import handlers, callback_handlers
from spotify import handlers, integration, callback_handlers
from vk import handlers, callback_handlers
from soundcloud import handlers, callback_handlers
import handlers
import inline_handlers
import callback_handlers
import error_handlers
if __name__ == '__main__':
with suppress(FileNotFoundError):
shutil.rmtree('downloads')
logging = asyncio.ensure_future(update_logging_files())
import_handlers()
web.run_app(app, port=8081)
loop.close()
|
normal
|
{
"blob_id": "d957fd5fbcdcf2e549323677185eabb8a50536c6",
"index": 5716,
"step-1": "<mask token>\n\n\ndef import_handlers():\n from deezer import handlers, callback_handlers\n from spotify import handlers, integration, callback_handlers\n from vk import handlers, callback_handlers\n from soundcloud import handlers, callback_handlers\n import handlers\n import inline_handlers\n import callback_handlers\n import error_handlers\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef import_handlers():\n from deezer import handlers, callback_handlers\n from spotify import handlers, integration, callback_handlers\n from vk import handlers, callback_handlers\n from soundcloud import handlers, callback_handlers\n import handlers\n import inline_handlers\n import callback_handlers\n import error_handlers\n\n\nif __name__ == '__main__':\n with suppress(FileNotFoundError):\n shutil.rmtree('downloads')\n logging = asyncio.ensure_future(update_logging_files())\n import_handlers()\n web.run_app(app, port=8081)\n loop.close()\n",
"step-3": "<mask token>\nloop = asyncio.get_event_loop()\n\n\ndef import_handlers():\n from deezer import handlers, callback_handlers\n from spotify import handlers, integration, callback_handlers\n from vk import handlers, callback_handlers\n from soundcloud import handlers, callback_handlers\n import handlers\n import inline_handlers\n import callback_handlers\n import error_handlers\n\n\nif __name__ == '__main__':\n with suppress(FileNotFoundError):\n shutil.rmtree('downloads')\n logging = asyncio.ensure_future(update_logging_files())\n import_handlers()\n web.run_app(app, port=8081)\n loop.close()\n",
"step-4": "from contextlib import suppress\nimport asyncio\nimport shutil\nfrom aiohttp import web\nfrom bot import app\nfrom var import var\nfrom logger import update_logging_files\nloop = asyncio.get_event_loop()\n\n\ndef import_handlers():\n from deezer import handlers, callback_handlers\n from spotify import handlers, integration, callback_handlers\n from vk import handlers, callback_handlers\n from soundcloud import handlers, callback_handlers\n import handlers\n import inline_handlers\n import callback_handlers\n import error_handlers\n\n\nif __name__ == '__main__':\n with suppress(FileNotFoundError):\n shutil.rmtree('downloads')\n logging = asyncio.ensure_future(update_logging_files())\n import_handlers()\n web.run_app(app, port=8081)\n loop.close()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom contextlib import suppress\nimport asyncio\nimport shutil\n\nfrom aiohttp import web\n\nfrom bot import app\nfrom var import var\nfrom logger import update_logging_files\n\nloop = asyncio.get_event_loop()\n\n\ndef import_handlers():\n from deezer import handlers, callback_handlers\n from spotify import handlers, integration, callback_handlers\n from vk import handlers, callback_handlers\n from soundcloud import handlers, callback_handlers\n import handlers\n import inline_handlers\n import callback_handlers\n import error_handlers\n\n\nif __name__ == '__main__':\n with suppress(FileNotFoundError):\n shutil.rmtree('downloads')\n logging = asyncio.ensure_future(update_logging_files())\n import_handlers()\n web.run_app(app, port=8081)\n loop.close()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python2.7
import os, sys
COMPILER = "gcc"
SRC_DIR = "../src"
INCLUDE_DIR = "../src"
BIN_DIR = "../bin"
BIN_NAME = False
CFLAGS = ["-O3", "-Wall", "-Wextra", "--std=c89", "-pedantic"]
DLIBS = ["ws2_32"] if os.name == "nt" else []
DEFINES = []
def strformat(fmt, var):
for k in var:
fmt = fmt.replace("{%s}" % str(k), var[k])
return fmt
def listdir(path):
return [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
def main():
os.chdir(sys.path[0])
if len(sys.argv) < 2:
print "usage: build.py c_file"
sys.exit()
global BIN_NAME
if not BIN_NAME:
BIN_NAME = sys.argv[1].replace(".c", ".exe" if os.name == "nt" else "")
if not os.path.exists(BIN_DIR):
os.makedirs(BIN_DIR)
cfiles = filter(lambda x:x.endswith((".c", ".C")), listdir(SRC_DIR))
cfiles.append(sys.argv[1])
cmd = strformat(
"{compiler} {flags} {include} {def} -o {outfile} {srcfiles} {libs} {argv}",
{
"compiler" : COMPILER,
"flags" : " ".join(CFLAGS),
"include" : "-I" + INCLUDE_DIR,
"def" : " ".join(map(lambda x: "-D " + x, DEFINES)),
"outfile" : BIN_DIR + "/" + BIN_NAME,
"srcfiles" : " ".join(cfiles),
"libs" : " ".join(map(lambda x: "-l" + x, DLIBS)),
"argv" : " ".join(sys.argv[2:])
})
print "compiling..."
res = os.system(cmd)
if not res:
print(BIN_DIR + "/" + BIN_NAME)
print("done" + (" with errors" if res else ""))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "1b4c86fe3aae25aeec6cd75fa8177983ce9d14a2",
"index": 1819,
"step-1": "#!/usr/bin/python2.7\nimport os, sys\n\nCOMPILER = \"gcc\"\nSRC_DIR = \"../src\"\nINCLUDE_DIR = \"../src\"\nBIN_DIR = \"../bin\"\nBIN_NAME = False\nCFLAGS = [\"-O3\", \"-Wall\", \"-Wextra\", \"--std=c89\", \"-pedantic\"]\nDLIBS = [\"ws2_32\"] if os.name == \"nt\" else []\nDEFINES = []\n\n\ndef strformat(fmt, var):\n for k in var:\n fmt = fmt.replace(\"{%s}\" % str(k), var[k])\n return fmt\n\n\ndef listdir(path):\n return [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]\n\n\ndef main():\n os.chdir(sys.path[0])\n\n if len(sys.argv) < 2:\n print \"usage: build.py c_file\"\n sys.exit()\n\n global BIN_NAME\n if not BIN_NAME:\n BIN_NAME = sys.argv[1].replace(\".c\", \".exe\" if os.name == \"nt\" else \"\")\n\n if not os.path.exists(BIN_DIR):\n os.makedirs(BIN_DIR)\n\n cfiles = filter(lambda x:x.endswith((\".c\", \".C\")), listdir(SRC_DIR))\n cfiles.append(sys.argv[1])\n\n cmd = strformat(\n \"{compiler} {flags} {include} {def} -o {outfile} {srcfiles} {libs} {argv}\",\n {\n \"compiler\" : COMPILER,\n \"flags\" : \" \".join(CFLAGS),\n \"include\" : \"-I\" + INCLUDE_DIR,\n \"def\" : \" \".join(map(lambda x: \"-D \" + x, DEFINES)),\n \"outfile\" : BIN_DIR + \"/\" + BIN_NAME,\n \"srcfiles\" : \" \".join(cfiles),\n \"libs\" : \" \".join(map(lambda x: \"-l\" + x, DLIBS)),\n \"argv\" : \" \".join(sys.argv[2:])\n })\n\n print \"compiling...\"\n res = os.system(cmd)\n\n if not res:\n print(BIN_DIR + \"/\" + BIN_NAME)\n\n print(\"done\" + (\" with errors\" if res else \"\"))\n\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import smart_imports
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = prototypes.AccountItemsPrototype.get_by_account_id(self.account_1.id)
self.collection_1 = prototypes.CollectionPrototype.create(caption='collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption='collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_2', description='description_2', approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_3', description='description_3', approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3, caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_all()),
(collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_filter(id__in=self.account_1_items.items_ids())),
(collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1}))
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {},
'account_items_in_kits': {},
'total_items': 2,
'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.account_1_items),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}),
'total_items': 2,
'account_items': 1})
|
normal
|
{
"blob_id": "89e5e82c073f7f87c00fc844c861c6c5cbe6a695",
"index": 8893,
"step-1": "<mask token>\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n <mask token>\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-2": "<mask token>\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-3": "<mask token>\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-4": "import smart_imports\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-5": "\nimport smart_imports\n\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n\n game_logic.create_test_map()\n\n self.account_1 = self.accounts_factory.create_account()\n\n self.account_1_items = prototypes.AccountItemsPrototype.get_by_account_id(self.account_1.id)\n\n self.collection_1 = prototypes.CollectionPrototype.create(caption='collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption='collection_2', description='description_2', approved=True)\n\n self.kit_1 = prototypes.KitPrototype.create(collection=self.collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_2', description='description_2', approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_3', description='description_3', approved=True)\n\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3, caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_all()),\n (collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_filter(id__in=self.account_1_items.items_ids())),\n (collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1}))\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None),\n {'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {},\n 'account_items_in_kits': {},\n 'total_items': 2,\n 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n\n self.assertEqual(logic.get_collections_statistics(self.account_1_items),\n {'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}),\n 'total_items': 2,\n 'account_items': 1})\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#the method of same name present in any class, it is call by anywhere
#object of different type is responds to same methods
class pycharm:
def execute(self):
print("COde check")
print("compile")
class MyEditor:
def execute(self):
print("Spell Cheack")
print("Auto COmpile")
print("COde check")
print("compile")
class laptop:
def code(self,ide):
ide.execute()
ide=pycharm()
ide2=MyEditor()
a1=laptop()
a1.code(ide)
print()
a1.code(ide2)
|
normal
|
{
"blob_id": "3ec162070f79ae38d6ae3ceb858c15b6e39f7027",
"index": 9870,
"step-1": "<mask token>\n\n\nclass MyEditor:\n\n def execute(self):\n print('Spell Cheack')\n print('Auto COmpile')\n print('COde check')\n print('compile')\n\n\nclass laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n",
"step-2": "class pycharm:\n\n def execute(self):\n print('COde check')\n print('compile')\n\n\nclass MyEditor:\n\n def execute(self):\n print('Spell Cheack')\n print('Auto COmpile')\n print('COde check')\n print('compile')\n\n\nclass laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n",
"step-3": "class pycharm:\n\n def execute(self):\n print('COde check')\n print('compile')\n\n\nclass MyEditor:\n\n def execute(self):\n print('Spell Cheack')\n print('Auto COmpile')\n print('COde check')\n print('compile')\n\n\nclass laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\na1.code(ide)\nprint()\na1.code(ide2)\n",
"step-4": "class pycharm:\n\n def execute(self):\n print('COde check')\n print('compile')\n\n\nclass MyEditor:\n\n def execute(self):\n print('Spell Cheack')\n print('Auto COmpile')\n print('COde check')\n print('compile')\n\n\nclass laptop:\n\n def code(self, ide):\n ide.execute()\n\n\nide = pycharm()\nide2 = MyEditor()\na1 = laptop()\na1.code(ide)\nprint()\na1.code(ide2)\n",
"step-5": "#the method of same name present in any class, it is call by anywhere\n#object of different type is responds to same methods\nclass pycharm:\n def execute(self):\n print(\"COde check\")\n print(\"compile\")\nclass MyEditor:\n def execute(self):\n print(\"Spell Cheack\")\n print(\"Auto COmpile\")\n print(\"COde check\")\n print(\"compile\")\n\n\nclass laptop:\n def code(self,ide):\n ide.execute()\n\nide=pycharm()\nide2=MyEditor()\na1=laptop()\na1.code(ide)\nprint()\na1.code(ide2)\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import pandas as pd
import numpy as np
import json
from pprint import pprint
from shapely.geometry import shape, Point
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from geopy.exc import GeocoderServiceError
import collections
from matplotlib import pyplot as plt
import time
import csv
geolocator = Nominatim(user_agent='Neel')
def get_neighborhoods():
with open('AnalysisNeighborhoods.geojson') as f:
neighborhoods_obj = json.load(f)
return neighborhoods_obj
def get_point_from_loc(location_str):
location_str = location_str.replace('(', '')
location_str = location_str.replace(')', '')
location_str = location_str.replace(',', '')
lat_lon = location_str.split(' ')
return Point(float(lat_lon[1]), float(lat_lon[0]))
def get_address_from_block(block_addr):
block_addr = block_addr.replace('Block Of', '')
block_addr_split = block_addr.split(' ')
block_addr = block_addr_split
# make it an address instead of block start
#print block_addr
block_addr[0] = str(int(block_addr[0]) + 1)
block_addr = ' '.join(block_addr) + ' San Francisco CA'
return block_addr
# Using latitude longitude location, find the neighborhood the eviction belongs to
def get_neighborhoods_from_locations(evictions, neighborhoods):
num_found = 0
num_total = 0
locations_dict = collections.defaultdict(int)
locations_with_years_dict = collections.defaultdict(lambda: collections.defaultdict(int))
for index, eviction in evictions.iterrows():
point = get_point_from_loc(eviction['Location'])
found_location = False
for feature in neighborhoods['features']:
polygon = shape(feature['geometry'])
if polygon.contains(point):
#print('Found containing polygon:', feature['properties']['nhood']())
num_found += 1
found_location = True
neighborhood = feature['properties']['nhood']
year = int(eviction['File Date'].split('/')[2])
if year > 90: year = year + 1900
else: year = year + 2000
locations_dict[neighborhood] += 1
locations_with_years_dict[neighborhood][str(year)] += 1
break
if not found_location:
print('Location ' + str(eviction['Eviction ID']) + ' not found, Given [location: ' + str(eviction['Neighborhoods - Analysis Boundaries']))
num_total += 1
years = [str(i) for i in range(1997, 2019)]
#years = ['97', '98', '99', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']
with open('Evictions_By_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
csv_writer.writerow(['Location', 'Number of Evictions'])
for k, v in locations_dict.items():
csv_writer.writerow([k, v])
with open('Evictions_By_Year_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
header = ['Location']
for year in years:
header.append(year)
csv_writer.writerow(header)
for k, v in locations_with_years_dict.items():
row = [k]
for year in years:
row.append(v[year])
csv_writer.writerow(row)
for k, v in locations_with_years_dict.items():
print k
evictions = [int(v[year]) for year in years]
# plt.figure()
# plt.plot(years, evictions)
plt.title(k)
for year in years:
print year + ': ' + str(v[year])
print ''
# plt.show()
return locations_dict, locations_with_years_dict
def get_geocode_address(addr):
try:
return geolocator.geocode(addr)
except (GeocoderTimedOut, GeocoderServiceError) as e:
time.sleep(5)
return get_geocode_address(addr)
#For rows missing latitude longitude location,
# use the block address to add missing lat long to dataframe
# If the block address is incorrect, print it so we can correct it manually
def set_missing_locations(evictions):
missing_location_rows = evictions[evictions['Location'].isnull()]
print('Num missing ' + str(len(missing_location_rows)))
num_not_found = 0
num_found = 0
for index, row in missing_location_rows.iterrows():
#print row['Eviction ID']
addr = get_address_from_block(row['Address'])
location = get_geocode_address(addr)
if location == None:
num_not_found += 1
print('NOT FOUND ' + str(row['Eviction ID']) + ': ' + addr)
else:
evictions.at[index, 'Location'] = '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'
num_found += 1
if (num_found + num_not_found) % 50 == 0:
print('Processed ' + str(num_found + num_not_found) + ' evictions')
print 'Total not found ' + str(num_not_found)
print 'Total found ' + str(num_found)
evictions.to_csv('Eviction_Notices_With_Locations.csv')
evictions = pd.read_csv('Eviction_Notices_With_Locations.csv')
neighborhoods = get_neighborhoods()
#set_missing_locations(evictions)
locations_dict, locations_with_years_dict = get_neighborhoods_from_locations(evictions, neighborhoods)
with open('AnalysisNeighborhoods.geojson') as f:
data = json.loads(f.read())
years = [i for i in range(1997, 2019)]
for neighborhood_obj in data['features']:
neighborhood_name = neighborhood_obj['properties']['nhood']
neighborhood_obj['properties']['evictions'] = {}
neighborhood_obj['properties']['evictions']['total'] = locations_dict[neighborhood_name]
for year in years:
neighborhood_obj['properties']['evictions'][str(year)] = locations_with_years_dict[neighborhood_name][year]
with open('AnalysisNeighborhoods.geojson', 'w') as f:
json.dump(data, f)
|
normal
|
{
"blob_id": "c1bb2052b3f623c6787ba080dff2dc81f4d6f55e",
"index": 1818,
"step-1": "import pandas as pd\nimport numpy as np\nimport json\nfrom pprint import pprint\nfrom shapely.geometry import shape, Point\nfrom geopy.geocoders import Nominatim\nfrom geopy.exc import GeocoderTimedOut\nfrom geopy.exc import GeocoderServiceError\nimport collections\nfrom matplotlib import pyplot as plt\nimport time\nimport csv\n\n\ngeolocator = Nominatim(user_agent='Neel')\n\ndef get_neighborhoods():\n with open('AnalysisNeighborhoods.geojson') as f:\n neighborhoods_obj = json.load(f)\n return neighborhoods_obj\n\ndef get_point_from_loc(location_str):\n location_str = location_str.replace('(', '')\n location_str = location_str.replace(')', '')\n location_str = location_str.replace(',', '')\n lat_lon = location_str.split(' ')\n return Point(float(lat_lon[1]), float(lat_lon[0]))\n\ndef get_address_from_block(block_addr):\n block_addr = block_addr.replace('Block Of', '')\n block_addr_split = block_addr.split(' ')\n\n block_addr = block_addr_split\n # make it an address instead of block start\n #print block_addr\n block_addr[0] = str(int(block_addr[0]) + 1)\n block_addr = ' '.join(block_addr) + ' San Francisco CA'\n return block_addr\n\n# Using latitude longitude location, find the neighborhood the eviction belongs to\ndef get_neighborhoods_from_locations(evictions, neighborhoods):\n num_found = 0\n num_total = 0\n locations_dict = collections.defaultdict(int)\n locations_with_years_dict = collections.defaultdict(lambda: collections.defaultdict(int))\n for index, eviction in evictions.iterrows():\n point = get_point_from_loc(eviction['Location'])\n found_location = False\n for feature in neighborhoods['features']:\n polygon = shape(feature['geometry'])\n if polygon.contains(point):\n #print('Found containing polygon:', feature['properties']['nhood']())\n num_found += 1\n found_location = True\n neighborhood = feature['properties']['nhood']\n year = int(eviction['File Date'].split('/')[2])\n if year > 90: year = year + 1900\n else: year = year + 2000\n\n locations_dict[neighborhood] += 1\n locations_with_years_dict[neighborhood][str(year)] += 1\n break\n if not found_location:\n print('Location ' + str(eviction['Eviction ID']) + ' not found, Given [location: ' + str(eviction['Neighborhoods - Analysis Boundaries']))\n num_total += 1\n\n years = [str(i) for i in range(1997, 2019)]\n #years = ['97', '98', '99', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']\n with open('Evictions_By_Location.csv', mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n csv_writer.writerow(['Location', 'Number of Evictions'])\n for k, v in locations_dict.items():\n csv_writer.writerow([k, v])\n\n with open('Evictions_By_Year_Location.csv', mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n header = ['Location']\n for year in years:\n header.append(year)\n csv_writer.writerow(header)\n for k, v in locations_with_years_dict.items():\n row = [k]\n for year in years:\n row.append(v[year])\n csv_writer.writerow(row)\n\n\n for k, v in locations_with_years_dict.items():\n print k\n evictions = [int(v[year]) for year in years]\n # plt.figure()\n # plt.plot(years, evictions)\n plt.title(k)\n for year in years:\n print year + ': ' + str(v[year])\n print ''\n # plt.show()\n return locations_dict, locations_with_years_dict\n\n\ndef get_geocode_address(addr):\n try:\n return geolocator.geocode(addr)\n except (GeocoderTimedOut, GeocoderServiceError) as e:\n time.sleep(5)\n return get_geocode_address(addr)\n\n#For rows missing latitude longitude location,\n# use the block address to add missing lat long to dataframe\n# If the block address is incorrect, print it so we can correct it manually\ndef set_missing_locations(evictions):\n\n missing_location_rows = evictions[evictions['Location'].isnull()]\n print('Num missing ' + str(len(missing_location_rows)))\n num_not_found = 0\n num_found = 0\n for index, row in missing_location_rows.iterrows():\n #print row['Eviction ID']\n addr = get_address_from_block(row['Address'])\n location = get_geocode_address(addr)\n if location == None:\n num_not_found += 1\n print('NOT FOUND ' + str(row['Eviction ID']) + ': ' + addr)\n else:\n evictions.at[index, 'Location'] = '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'\n num_found += 1\n if (num_found + num_not_found) % 50 == 0:\n print('Processed ' + str(num_found + num_not_found) + ' evictions')\n\n print 'Total not found ' + str(num_not_found)\n print 'Total found ' + str(num_found)\n evictions.to_csv('Eviction_Notices_With_Locations.csv')\n\n\nevictions = pd.read_csv('Eviction_Notices_With_Locations.csv')\nneighborhoods = get_neighborhoods()\n#set_missing_locations(evictions)\n\nlocations_dict, locations_with_years_dict = get_neighborhoods_from_locations(evictions, neighborhoods)\n\nwith open('AnalysisNeighborhoods.geojson') as f:\n data = json.loads(f.read())\n\nyears = [i for i in range(1997, 2019)]\n\nfor neighborhood_obj in data['features']:\n neighborhood_name = neighborhood_obj['properties']['nhood']\n neighborhood_obj['properties']['evictions'] = {}\n neighborhood_obj['properties']['evictions']['total'] = locations_dict[neighborhood_name]\n for year in years:\n neighborhood_obj['properties']['evictions'][str(year)] = locations_with_years_dict[neighborhood_name][year]\n\nwith open('AnalysisNeighborhoods.geojson', 'w') as f:\n json.dump(data, f)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
""" Generate test pads for padder. """
# usage: python gen.py > pads.txt
import random
pad = ""
count = 0
# The pad chars MUST match the character set used by padder.
# See the 'characters' variable in 'main.hpp' for more
# information.
chars = "abcdefghijklmnopqrstuvwxyz0123456789-"
print "#", "Pad"
while count < 12:
for x in xrange(0, 98):
pad += random.choice(chars)
count = count+1
print count, pad
pad = ""
|
normal
|
{
"blob_id": "2cdcd6976a1ec99b927adcedc48c36bbda1b4e18",
"index": 1005,
"step-1": "\"\"\" Generate test pads for padder. \"\"\"\n\n# usage: python gen.py > pads.txt\n\nimport random\n\npad = \"\"\ncount = 0\n\n# The pad chars MUST match the character set used by padder.\n# See the 'characters' variable in 'main.hpp' for more\n# information.\nchars = \"abcdefghijklmnopqrstuvwxyz0123456789-\"\n\nprint \"#\", \"Pad\"\nwhile count < 12:\n for x in xrange(0, 98):\n pad += random.choice(chars)\n\n count = count+1\n print count, pad\n pad = \"\"\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Unit tests for misc. ticket functions."""
from pdm_utils.classes import bundle
from pdm_utils.classes import genome
from pdm_utils.classes import ticket
from pdm_utils.classes import eval
from pdm_utils.functions import tickets
from pdm_utils.constants import constants
import unittest
class TestTicketFunctions1(unittest.TestCase):
def setUp(self):
self.required_keys = constants.IMPORT_TABLE_STRUCTURE["required"]
self.optional_keys = constants.IMPORT_TABLE_STRUCTURE["optional"]
self.keywords = constants.IMPORT_TABLE_STRUCTURE["keywords"]
self.ticket_dict1 = {}
self.ticket_dict1["type"] = "add"
self.ticket_dict1["phage_id"] = "Trixie"
self.ticket_dict1["description_field"] = "product"
self.ticket_dict1["eval_mode"] = "final"
self.ticket_dict1["host_genus"] = "retrieve"
self.ticket_dict1["cluster"] = "retain"
self.ticket_dict1["subcluster"] = "A2"
self.ticket_dict1["accession"] = "parse"
self.ticket_dict2 = {}
self.ticket_dict3 = {}
self.ticket_dict3["type"] = "ADD"
self.ticket_dict3["phage_id"] = "Trixie"
self.ticket_dict3["description_field"] = "PRODUCT"
self.ticket_dict3["eval_mode"] = "FINAL"
self.ticket_dict3["host_genus"] = "RETRIEVE"
self.ticket_dict3["subcluster"] = None
self.ticket_dict3["accession"] = "PARSE"
self.ticket_dict3["retrieve_record"] = "RETAIN"
self.ticket_dict4 = {}
self.ticket_dict4["type"] = "ADD"
self.ticket_dict4["phage_id"] = "Trixie"
def test_modify_import_data_1(self):
"""Verify returns False if there are missing required keys."""
result = tickets.modify_import_data(self.ticket_dict2,
self.required_keys, self.optional_keys, self.keywords)
self.assertFalse(result)
def test_modify_import_data_2(self):
"""Verify returns False if there are extra keys."""
self.ticket_dict3["extra"] = "extra"
result = tickets.modify_import_data(self.ticket_dict3,
self.required_keys, self.optional_keys, self.keywords)
self.assertFalse(result)
def test_modify_import_data_3(self):
"""Verify returns True with keywords identified and values lowercased."""
result = tickets.modify_import_data(self.ticket_dict3,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict3["host_genus"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict3["retrieve_record"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict3["subcluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict3["accession"], "parse")
with self.subTest():
self.assertEqual(self.ticket_dict3["type"], "add")
with self.subTest():
self.assertEqual(self.ticket_dict3["description_field"], "product")
with self.subTest():
self.assertEqual(self.ticket_dict3["eval_mode"], "final")
def test_modify_import_data_4(self):
"""Verify returns True with completed dictionary from a
minimal add ticket."""
self.ticket_dict4["description_field"] = "product"
self.ticket_dict4["eval_mode"] = "final"
result = tickets.modify_import_data(self.ticket_dict4,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict4["host_genus"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["cluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["subcluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_author"], "1")
with self.subTest():
self.assertEqual(self.ticket_dict4["retrieve_record"], "1")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_status"], "draft")
with self.subTest():
self.assertEqual(self.ticket_dict4["accession"], "")
def test_modify_import_data_5(self):
"""Verify returns True with completed dictionary from a
minimal replace ticket."""
self.ticket_dict4["type"] = "replace"
self.ticket_dict4["description_field"] = "product"
self.ticket_dict4["eval_mode"] = "final"
result = tickets.modify_import_data(self.ticket_dict4,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict4["host_genus"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["cluster"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["subcluster"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_author"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["retrieve_record"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_status"], "final")
with self.subTest():
self.assertEqual(self.ticket_dict4["accession"], "retain")
def test_parse_import_ticket_data_1(self):
"""Verify ticket is generated from correct data dictionary."""
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set(["host_genus"]))
with self.subTest():
self.assertEqual(tkt.data_retain, set(["cluster"]))
with self.subTest():
self.assertEqual(tkt.data_parse, set(["accession"]))
with self.subTest():
self.assertEqual(tkt.data_add, set(["subcluster"]))
def test_parse_import_ticket_data_2(self):
"""Verify ticket is generated from correct data dictionary with
no data in 'retain', 'retrieve', or 'parse' sets."""
self.ticket_dict1["host_genus"] = "Mycobacterium"
self.ticket_dict1["cluster"] = "A"
self.ticket_dict1["subcluster"] = "A2"
self.ticket_dict1["accession"] = "ABC123"
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set())
with self.subTest():
self.assertEqual(tkt.data_retain, set())
with self.subTest():
self.assertEqual(tkt.data_parse, set())
with self.subTest():
self.assertEqual(tkt.data_add, set(["subcluster", "host_genus",
"cluster", "accession"]))
def test_parse_import_ticket_data_3(self):
"""Verify ticket is generated from correct data dictionary with
no data in 'add' sets."""
self.ticket_dict1["host_genus"] = "retrieve"
self.ticket_dict1["cluster"] = "retrieve"
self.ticket_dict1["subcluster"] = "retrieve"
self.ticket_dict1["accession"] = "retrieve"
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set(["subcluster", "host_genus",
"cluster", "accession"]))
with self.subTest():
self.assertEqual(tkt.data_retain, set())
with self.subTest():
self.assertEqual(tkt.data_parse, set())
with self.subTest():
self.assertEqual(tkt.data_add, set())
def test_set_empty_1(self):
"""Verify one None value is set to ''."""
data_dict = {"type":"add","cluster":None}
tickets.set_empty(data_dict)
with self.subTest():
self.assertEqual(data_dict["type"], "add")
with self.subTest():
self.assertEqual(data_dict["cluster"], "")
def test_set_keywords_1(self):
"""Verify one value is lowercased."""
data_dict = {"type":"ADD",
"cluster":"RETRIEVE",
"subcluster": "NONE",
"host_genus": "PARSE",
"retrieve_record": "RETAIN"}
keywords = set(["retrieve", "retain"])
tickets.set_keywords(data_dict, self.keywords)
with self.subTest():
self.assertEqual(data_dict["type"], "ADD")
with self.subTest():
self.assertEqual(data_dict["cluster"], "retrieve")
with self.subTest():
self.assertEqual(data_dict["subcluster"], "none")
with self.subTest():
self.assertEqual(data_dict["host_genus"], "parse")
with self.subTest():
self.assertEqual(data_dict["retrieve_record"], "retain")
def test_set_missing_keys_1(self):
"""Verify one missing key is added."""
data_dict = {"type":"add", "cluster":""}
key_set = set(["type", "host_genus"])
tickets.set_missing_keys(data_dict, key_set)
with self.subTest():
self.assertEqual(len(data_dict.keys()), 3)
with self.subTest():
self.assertEqual(data_dict["host_genus"], "")
def test_set_missing_keys_2(self):
"""Verify no missing key is added."""
data_dict = {"type":"add", "cluster":""}
key_set = set(["type", "cluster"])
tickets.set_missing_keys(data_dict, key_set)
self.assertEqual(len(data_dict.keys()), 2)
def test_set_dict_value_1(self):
"""Verify empty value is replaced with first value."""
data_dict = {"type":"add", "cluster":""}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "A")
def test_set_dict_value_2(self):
"""Verify empty value is replaced with second value."""
data_dict = {"type":"replace", "cluster":""}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "B")
def test_set_dict_value_3(self):
"""Verify non-empty value is not replaced."""
data_dict = {"type":"replace", "cluster":"C"}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "C")
def test_construct_tickets_1(self):
"""Verify two tickets are constructed correctly.
The first ticket contains all required and optional fields.
The second ticket contains all required fields."""
dict_list = [self.ticket_dict1, self.ticket_dict4]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {"check_locus_tag": False}}
list_of_tickets = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
with self.subTest():
self.assertEqual(len(list_of_tickets), 2)
with self.subTest():
self.assertEqual(list_of_tickets[0].id, 1)
with self.subTest():
self.assertEqual(list_of_tickets[0].eval_mode, "final")
with self.subTest():
self.assertEqual(list_of_tickets[0].description_field, "product")
with self.subTest():
self.assertTrue(list_of_tickets[0].eval_flags["check_locus_tag"])
with self.subTest():
self.assertEqual(list_of_tickets[1].id, 2)
with self.subTest():
self.assertEqual(list_of_tickets[1].eval_mode, "custom_eval_mode")
with self.subTest():
self.assertEqual(list_of_tickets[1].description_field, "function")
with self.subTest():
self.assertFalse(list_of_tickets[1].eval_flags["check_locus_tag"])
def test_construct_tickets_2(self):
"""Verify one ticket is constructed correctly. The second data
dictionary is not structured correctly."""
dict_list = [self.ticket_dict1, self.ticket_dict2]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {}}
list_of_tickets = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
with self.subTest():
self.assertEqual(len(list_of_tickets), 1)
def test_construct_tickets_3(self):
"""Verify four tickets constructed correctly. The first two tickets
contain all required and optional fields. The second two tickets
contain all required fields. Verify that each eval_flag dictionary
is a separate object that can be modified without impacting the other
eval_flag dictionaries."""
tkt_dict1 = {}
tkt_dict1["type"] = "add"
tkt_dict1["phage_id"] = "Trixie"
tkt_dict1["description_field"] = "product"
tkt_dict1["eval_mode"] = "final"
tkt_dict2 = {}
tkt_dict2["type"] = "add"
tkt_dict2["phage_id"] = "L5"
tkt_dict2["description_field"] = "product"
tkt_dict2["eval_mode"] = "final"
tkt_dict3 = {}
tkt_dict3["type"] = "add"
tkt_dict3["phage_id"] = "RedRock"
tkt_dict4 = {}
tkt_dict4["type"] = "add"
tkt_dict4["phage_id"] = "Bxb1"
dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {"check_locus_tag": False}}
tkt_list = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
tkt_list[0].eval_flags["check_locus_tag"] = 0
tkt_list[1].eval_flags["check_locus_tag"] = 1
tkt_list[2].eval_flags["check_locus_tag"] = 2
tkt_list[3].eval_flags["check_locus_tag"] = 3
with self.subTest():
self.assertEqual(tkt_list[0].eval_flags["check_locus_tag"], 0)
with self.subTest():
self.assertEqual(tkt_list[1].eval_flags["check_locus_tag"], 1)
with self.subTest():
self.assertEqual(tkt_list[2].eval_flags["check_locus_tag"], 2)
with self.subTest():
self.assertEqual(tkt_list[3].eval_flags["check_locus_tag"], 3)
def test_identify_duplicates_1(self):
"""Verify no duplicates are produced."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 2
ticket2.type = "replace"
ticket2.phage_id = "L5"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_2(self):
"""Verify two tickets with 'none' duplicates
do not generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = "none"
ticket1.type = "replace"
ticket1.phage_id = "none"
ticket2 = ticket.ImportTicket()
ticket2.id = "none"
ticket2.type = "replace"
ticket2.phage_id = "none"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_3(self):
"""Verify two tickets with id duplicates
do generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "L5"
ticket2 = ticket.ImportTicket()
ticket2.id = 1
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 1)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_4(self):
"""Verify two tickets with Primary Phage ID duplicates
do generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 2
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 1)
def test_identify_duplicates_6(self):
"""Verify two tickets with multiple duplicates
do generate multiple errors."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 1
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 1)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 1)
class TestTicketFunctions2(unittest.TestCase):
def setUp(self):
self.ticket1 = ticket.ImportTicket()
self.ticket2 = ticket.ImportTicket()
self.ticket1.phage_id = "Trixie"
self.ticket2.phage_id = "L5"
self.bundle1 = bundle.Bundle()
self.bundle2 = bundle.Bundle()
self.bundle1.ticket = self.ticket1
self.bundle2.ticket = self.ticket2
class TestTicketFunctions3(unittest.TestCase):
def setUp(self):
self.data_dict = {}
self.data_dict["host_genus"] = "Mycobacterium smegmatis"
self.data_dict["accession"] = "ABC123.1"
self.data_dict["annotation_status"] = "final"
self.data_dict["cluster"] = "A"
self.data_dict["subcluster"] = "A2"
self.data_dict["annotation_author"] = 1
self.data_dict["retrieve_record"] = 1
self.tkt1 = ticket.ImportTicket()
self.tkt1.phage_id = "Trixie_Draft"
self.tkt1.data_dict = self.data_dict
def test_get_genome_1(self):
"""Verify no data from ticket is added to genome."""
self.tkt1.data_add = set([""])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.id, "Trixie")
with self.subTest():
self.assertEqual(gnm.name, "Trixie_Draft")
with self.subTest():
self.assertEqual(gnm.type, "add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.cluster, "")
with self.subTest():
self.assertEqual(gnm.subcluster, "")
with self.subTest():
self.assertEqual(gnm.annotation_status, "")
with self.subTest():
self.assertEqual(gnm.annotation_author, -1)
with self.subTest():
self.assertEqual(gnm.retrieve_record, -1)
with self.subTest():
self.assertEqual(gnm.accession, "")
def test_get_genome_2(self):
"""Verify host_genus data from ticket is added to genome."""
self.tkt1.data_add = set(["host_genus"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "Mycobacterium")
with self.subTest():
self.assertEqual(gnm.cluster, "")
def test_get_genome_3(self):
"""Verify cluster data from ticket is added to genome."""
self.tkt1.data_add = set(["cluster"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.cluster, "A")
def test_get_genome_4(self):
"""Verify subcluster data from ticket is added to genome."""
self.tkt1.data_add = set(["subcluster"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.subcluster, "A2")
def test_get_genome_5(self):
"""Verify annotation_status data from ticket is added to genome."""
self.tkt1.data_add = set(["annotation_status"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.annotation_status, "final")
def test_get_genome_6(self):
"""Verify annotation_author data from ticket is added to genome."""
self.tkt1.data_add = set(["annotation_author"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.annotation_author, 1)
def test_get_genome_7(self):
"""Verify retrieve_record data from ticket is added to genome."""
self.tkt1.data_add = set(["retrieve_record"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.retrieve_record, 1)
def test_get_genome_8(self):
"""Verify accession data from ticket is added to genome."""
self.tkt1.data_add = set(["accession"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.accession, "ABC123")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "d8ba2557e20920eaadd2fd35f0ebdf1b4a5b33da",
"index": 9010,
"step-1": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n <mask token>\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n <mask token>\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n <mask token>\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['host_genus']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set(['cluster']))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set(['accession']))\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster']))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'cluster'])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'L5'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom pdm_utils.classes import bundle\nfrom pdm_utils.classes import genome\nfrom pdm_utils.classes import ticket\nfrom pdm_utils.classes import eval\nfrom pdm_utils.functions import tickets\nfrom pdm_utils.constants import constants\nimport unittest\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n\n def test_modify_import_data_1(self):\n \"\"\"Verify returns False if there are missing required keys.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict2, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n def test_modify_import_data_3(self):\n \"\"\"Verify returns True with keywords identified and values lowercased.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict3['host_genus'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['subcluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['accession'], 'parse')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['type'], 'add')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['description_field'], 'product')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['eval_mode'], 'final')\n\n def test_modify_import_data_4(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal add ticket.\"\"\"\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], '1')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], '1')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'draft')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], '')\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['host_genus']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set(['cluster']))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set(['accession']))\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster']))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n\n def test_set_empty_1(self):\n \"\"\"Verify one None value is set to ''.\"\"\"\n data_dict = {'type': 'add', 'cluster': None}\n tickets.set_empty(data_dict)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'add')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], '')\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'cluster'])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n\n def test_set_dict_value_1(self):\n \"\"\"Verify empty value is replaced with first value.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'A')\n\n def test_set_dict_value_2(self):\n \"\"\"Verify empty value is replaced with second value.\"\"\"\n data_dict = {'type': 'replace', 'cluster': ''}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'B')\n\n def test_set_dict_value_3(self):\n \"\"\"Verify non-empty value is not replaced.\"\"\"\n data_dict = {'type': 'replace', 'cluster': 'C'}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'C')\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n\n def test_construct_tickets_2(self):\n \"\"\"Verify one ticket is constructed correctly. The second data\n dictionary is not structured correctly.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict2]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict': {}\n }\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 1)\n\n def test_construct_tickets_3(self):\n \"\"\"Verify four tickets constructed correctly. The first two tickets\n contain all required and optional fields. The second two tickets\n contain all required fields. Verify that each eval_flag dictionary\n is a separate object that can be modified without impacting the other\n eval_flag dictionaries.\"\"\"\n tkt_dict1 = {}\n tkt_dict1['type'] = 'add'\n tkt_dict1['phage_id'] = 'Trixie'\n tkt_dict1['description_field'] = 'product'\n tkt_dict1['eval_mode'] = 'final'\n tkt_dict2 = {}\n tkt_dict2['type'] = 'add'\n tkt_dict2['phage_id'] = 'L5'\n tkt_dict2['description_field'] = 'product'\n tkt_dict2['eval_mode'] = 'final'\n tkt_dict3 = {}\n tkt_dict3['type'] = 'add'\n tkt_dict3['phage_id'] = 'RedRock'\n tkt_dict4 = {}\n tkt_dict4['type'] = 'add'\n tkt_dict4['phage_id'] = 'Bxb1'\n dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n tkt_list = tickets.construct_tickets(dict_list, eval_data_dict,\n 'function', self.required_keys, self.optional_keys, self.keywords)\n tkt_list[0].eval_flags['check_locus_tag'] = 0\n tkt_list[1].eval_flags['check_locus_tag'] = 1\n tkt_list[2].eval_flags['check_locus_tag'] = 2\n tkt_list[3].eval_flags['check_locus_tag'] = 3\n with self.subTest():\n self.assertEqual(tkt_list[0].eval_flags['check_locus_tag'], 0)\n with self.subTest():\n self.assertEqual(tkt_list[1].eval_flags['check_locus_tag'], 1)\n with self.subTest():\n self.assertEqual(tkt_list[2].eval_flags['check_locus_tag'], 2)\n with self.subTest():\n self.assertEqual(tkt_list[3].eval_flags['check_locus_tag'], 3)\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'L5'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_3(self):\n \"\"\"Verify two tickets with id duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'L5'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"Unit tests for misc. ticket functions.\"\"\"\n\nfrom pdm_utils.classes import bundle\nfrom pdm_utils.classes import genome\nfrom pdm_utils.classes import ticket\nfrom pdm_utils.classes import eval\nfrom pdm_utils.functions import tickets\nfrom pdm_utils.constants import constants\nimport unittest\n\n\n\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE[\"required\"]\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE[\"optional\"]\n self.keywords = constants.IMPORT_TABLE_STRUCTURE[\"keywords\"]\n\n self.ticket_dict1 = {}\n self.ticket_dict1[\"type\"] = \"add\"\n self.ticket_dict1[\"phage_id\"] = \"Trixie\"\n self.ticket_dict1[\"description_field\"] = \"product\"\n self.ticket_dict1[\"eval_mode\"] = \"final\"\n self.ticket_dict1[\"host_genus\"] = \"retrieve\"\n self.ticket_dict1[\"cluster\"] = \"retain\"\n self.ticket_dict1[\"subcluster\"] = \"A2\"\n self.ticket_dict1[\"accession\"] = \"parse\"\n\n\n self.ticket_dict2 = {}\n\n self.ticket_dict3 = {}\n self.ticket_dict3[\"type\"] = \"ADD\"\n self.ticket_dict3[\"phage_id\"] = \"Trixie\"\n self.ticket_dict3[\"description_field\"] = \"PRODUCT\"\n self.ticket_dict3[\"eval_mode\"] = \"FINAL\"\n self.ticket_dict3[\"host_genus\"] = \"RETRIEVE\"\n self.ticket_dict3[\"subcluster\"] = None\n self.ticket_dict3[\"accession\"] = \"PARSE\"\n self.ticket_dict3[\"retrieve_record\"] = \"RETAIN\"\n\n\n self.ticket_dict4 = {}\n self.ticket_dict4[\"type\"] = \"ADD\"\n self.ticket_dict4[\"phage_id\"] = \"Trixie\"\n\n\n def test_modify_import_data_1(self):\n \"\"\"Verify returns False if there are missing required keys.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict2,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3[\"extra\"] = \"extra\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n\n def test_modify_import_data_3(self):\n \"\"\"Verify returns True with keywords identified and values lowercased.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"host_genus\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"subcluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"accession\"], \"parse\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"description_field\"], \"product\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"eval_mode\"], \"final\")\n\n\n def test_modify_import_data_4(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal add ticket.\"\"\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"1\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"1\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"draft\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"\")\n\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4[\"type\"] = \"replace\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"final\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"retain\")\n\n\n\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set([\"host_genus\"]))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set([\"cluster\"]))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set([\"accession\"]))\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\"]))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1[\"host_genus\"] = \"Mycobacterium\"\n self.ticket_dict1[\"cluster\"] = \"A\"\n self.ticket_dict1[\"subcluster\"] = \"A2\"\n self.ticket_dict1[\"accession\"] = \"ABC123\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\", \"host_genus\",\n \"cluster\", \"accession\"]))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1[\"host_genus\"] = \"retrieve\"\n self.ticket_dict1[\"cluster\"] = \"retrieve\"\n self.ticket_dict1[\"subcluster\"] = \"retrieve\"\n self.ticket_dict1[\"accession\"] = \"retrieve\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set([\"subcluster\", \"host_genus\",\n \"cluster\", \"accession\"]))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n\n\n\n\n def test_set_empty_1(self):\n \"\"\"Verify one None value is set to ''.\"\"\"\n data_dict = {\"type\":\"add\",\"cluster\":None}\n tickets.set_empty(data_dict)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"\")\n\n\n\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {\"type\":\"ADD\",\n \"cluster\":\"RETRIEVE\",\n \"subcluster\": \"NONE\",\n \"host_genus\": \"PARSE\",\n \"retrieve_record\": \"RETAIN\"}\n keywords = set([\"retrieve\", \"retain\"])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"ADD\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(data_dict[\"subcluster\"], \"none\")\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"parse\")\n with self.subTest():\n self.assertEqual(data_dict[\"retrieve_record\"], \"retain\")\n\n\n\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"host_genus\"])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"\")\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"cluster\"])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n\n\n\n\n def test_set_dict_value_1(self):\n \"\"\"Verify empty value is replaced with first value.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"A\")\n\n def test_set_dict_value_2(self):\n \"\"\"Verify empty value is replaced with second value.\"\"\"\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")\n\n def test_set_dict_value_3(self):\n \"\"\"Verify non-empty value is not replaced.\"\"\"\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")\n\n\n\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {\"check_locus_tag\": False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, \"product\")\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags[\"check_locus_tag\"])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, \"custom_eval_mode\")\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, \"function\")\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags[\"check_locus_tag\"])\n\n def test_construct_tickets_2(self):\n \"\"\"Verify one ticket is constructed correctly. The second data\n dictionary is not structured correctly.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict2]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 1)\n\n def test_construct_tickets_3(self):\n \"\"\"Verify four tickets constructed correctly. The first two tickets\n contain all required and optional fields. The second two tickets\n contain all required fields. Verify that each eval_flag dictionary\n is a separate object that can be modified without impacting the other\n eval_flag dictionaries.\"\"\"\n\n tkt_dict1 = {}\n tkt_dict1[\"type\"] = \"add\"\n tkt_dict1[\"phage_id\"] = \"Trixie\"\n tkt_dict1[\"description_field\"] = \"product\"\n tkt_dict1[\"eval_mode\"] = \"final\"\n\n tkt_dict2 = {}\n tkt_dict2[\"type\"] = \"add\"\n tkt_dict2[\"phage_id\"] = \"L5\"\n tkt_dict2[\"description_field\"] = \"product\"\n tkt_dict2[\"eval_mode\"] = \"final\"\n\n tkt_dict3 = {}\n tkt_dict3[\"type\"] = \"add\"\n tkt_dict3[\"phage_id\"] = \"RedRock\"\n\n tkt_dict4 = {}\n tkt_dict4[\"type\"] = \"add\"\n tkt_dict4[\"phage_id\"] = \"Bxb1\"\n\n dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {\"check_locus_tag\": False}}\n tkt_list = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n\n tkt_list[0].eval_flags[\"check_locus_tag\"] = 0\n tkt_list[1].eval_flags[\"check_locus_tag\"] = 1\n tkt_list[2].eval_flags[\"check_locus_tag\"] = 2\n tkt_list[3].eval_flags[\"check_locus_tag\"] = 3\n\n with self.subTest():\n self.assertEqual(tkt_list[0].eval_flags[\"check_locus_tag\"], 0)\n with self.subTest():\n self.assertEqual(tkt_list[1].eval_flags[\"check_locus_tag\"], 1)\n with self.subTest():\n self.assertEqual(tkt_list[2].eval_flags[\"check_locus_tag\"], 2)\n with self.subTest():\n self.assertEqual(tkt_list[3].eval_flags[\"check_locus_tag\"], 3)\n\n\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n def test_identify_duplicates_3(self):\n \"\"\"Verify two tickets with id duplicates\n do generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n\n self.ticket1.phage_id = \"Trixie\"\n self.ticket2.phage_id = \"L5\"\n\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict[\"host_genus\"] = \"Mycobacterium smegmatis\"\n self.data_dict[\"accession\"] = \"ABC123.1\"\n self.data_dict[\"annotation_status\"] = \"final\"\n self.data_dict[\"cluster\"] = \"A\"\n self.data_dict[\"subcluster\"] = \"A2\"\n self.data_dict[\"annotation_author\"] = 1\n self.data_dict[\"retrieve_record\"] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = \"Trixie_Draft\"\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.id, \"Trixie\")\n with self.subTest():\n self.assertEqual(gnm.name, \"Trixie_Draft\")\n with self.subTest():\n self.assertEqual(gnm.type, \"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, \"\")\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"cluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"A\")\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"subcluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"A2\")\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"annotation_status\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"final\")\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"annotation_author\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"retrieve_record\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"accession\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.accession, \"ABC123\")\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
22,
24,
27,
39,
40
]
}
|
[
22,
24,
27,
39,
40
] |
# -- coding: utf-8 --
from django.conf.urls import url
from myapp.view import views
from myapp.view import story
from myapp.view import img # 添加
from myapp.view import login
from myapp.view import tuling
from myapp.view import utilView
from myapp.view.wechat import wechat_modules
from myapp.view import router
urlpatterns = [
url(r'get_img_api$', router.get_img_api),
url(r'add_book$', views.add_book, ),
url(r'show_books$', views.show_books, ),
url(r'add_story$', story.add_story),
url(r'show_storys$', story.show_storys),
url(r'add_comment$', story.add_comment),
url(r'show_comments$', story.show_comments),
url(r'uploadImg$', img.uploadImg),
url(r'showImg$', img.showImg),
url(r'uploadImgForUs$', img.uploadImgForUs),
url(r'showImgForUs', img.showImgForUs),
url(r'add_user', login.add_user),
url(r'login', login.login),
url(r'get_username', login.get_username),
url(r'send_register_email', login.send_register_email),
url(r'check_username', login.check_username),
url(r'chat_with_tuling', tuling.chat_with_tuling),
url(r'utilView_getLive2d', utilView.get_live2d),
url(r'utilView_getRandJson', utilView.get_rand_json),
url(r'get_wechat', wechat_modules.on_get),
url(r'', login.other_request),
]
|
normal
|
{
"blob_id": "373c102018fdcc5211263304c368c2e8beef3257",
"index": 720,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('get_img_api$', router.get_img_api), url('add_book$',\n views.add_book), url('show_books$', views.show_books), url('add_story$',\n story.add_story), url('show_storys$', story.show_storys), url(\n 'add_comment$', story.add_comment), url('show_comments$', story.\n show_comments), url('uploadImg$', img.uploadImg), url('showImg$', img.\n showImg), url('uploadImgForUs$', img.uploadImgForUs), url(\n 'showImgForUs', img.showImgForUs), url('add_user', login.add_user), url\n ('login', login.login), url('get_username', login.get_username), url(\n 'send_register_email', login.send_register_email), url('check_username',\n login.check_username), url('chat_with_tuling', tuling.chat_with_tuling),\n url('utilView_getLive2d', utilView.get_live2d), url(\n 'utilView_getRandJson', utilView.get_rand_json), url('get_wechat',\n wechat_modules.on_get), url('', login.other_request)]\n",
"step-3": "from django.conf.urls import url\nfrom myapp.view import views\nfrom myapp.view import story\nfrom myapp.view import img\nfrom myapp.view import login\nfrom myapp.view import tuling\nfrom myapp.view import utilView\nfrom myapp.view.wechat import wechat_modules\nfrom myapp.view import router\nurlpatterns = [url('get_img_api$', router.get_img_api), url('add_book$',\n views.add_book), url('show_books$', views.show_books), url('add_story$',\n story.add_story), url('show_storys$', story.show_storys), url(\n 'add_comment$', story.add_comment), url('show_comments$', story.\n show_comments), url('uploadImg$', img.uploadImg), url('showImg$', img.\n showImg), url('uploadImgForUs$', img.uploadImgForUs), url(\n 'showImgForUs', img.showImgForUs), url('add_user', login.add_user), url\n ('login', login.login), url('get_username', login.get_username), url(\n 'send_register_email', login.send_register_email), url('check_username',\n login.check_username), url('chat_with_tuling', tuling.chat_with_tuling),\n url('utilView_getLive2d', utilView.get_live2d), url(\n 'utilView_getRandJson', utilView.get_rand_json), url('get_wechat',\n wechat_modules.on_get), url('', login.other_request)]\n",
"step-4": "# -- coding: utf-8 --\nfrom django.conf.urls import url\nfrom myapp.view import views\nfrom myapp.view import story\nfrom myapp.view import img # 添加\nfrom myapp.view import login\nfrom myapp.view import tuling\nfrom myapp.view import utilView\nfrom myapp.view.wechat import wechat_modules\nfrom myapp.view import router\n\nurlpatterns = [\n url(r'get_img_api$', router.get_img_api),\n url(r'add_book$', views.add_book, ),\n url(r'show_books$', views.show_books, ),\n\n url(r'add_story$', story.add_story),\n url(r'show_storys$', story.show_storys),\n\n url(r'add_comment$', story.add_comment),\n url(r'show_comments$', story.show_comments),\n\n url(r'uploadImg$', img.uploadImg),\n url(r'showImg$', img.showImg),\n url(r'uploadImgForUs$', img.uploadImgForUs),\n url(r'showImgForUs', img.showImgForUs),\n\n url(r'add_user', login.add_user),\n url(r'login', login.login),\n url(r'get_username', login.get_username),\n url(r'send_register_email', login.send_register_email),\n url(r'check_username', login.check_username),\n\n url(r'chat_with_tuling', tuling.chat_with_tuling),\n url(r'utilView_getLive2d', utilView.get_live2d),\n url(r'utilView_getRandJson', utilView.get_rand_json),\n\n url(r'get_wechat', wechat_modules.on_get),\n\n url(r'', login.other_request),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
from z3 import *
def combine(iter):
tmp_list = [i for i in iter]
res = tmp_list[0]
for i in tmp_list[1:]:
res += i
return res
def co_prime(num1, num2):
for num in range(2, min(num1, num2) + 1):
if num1 % num == 0 and num2 % num == 0:
return False
return True
def gcd(*nums):
min_num = 1 << 32
for num in nums:
if num != 0:
min_num = min(min_num, abs(num))
for i in range(min_num, 1, -1):
flag = True
for num in nums:
if num % i != 0:
flag = False
break
if flag:
return i
return 1
class FormulaTemplate:
def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w
self.k = k # amount of clause 多少个子句
self.h = h # number of inequality 第一类不等式数量上限
self.m = m # number of mode number 第二类不等式数量上限
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.s = Solver()
for i in range(h):
# 不等式系数ae_ij不能全部为0
self.s.add(Or(*[a > 0 for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))
for i in range(m):
# 模等式的系数am_ij不能全部小于等于0
self.s.add(Or(*[am > 0 for am in self.amij[i]]))
# 模等式的系数am_ij不能大于模e
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
# for j in range(i + 1, m):
# self.s.add(Or(self.ei[i] != self.ei[j],
# *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))
# 余数c_i必须小于模e
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])
# 模必须大于等于2,并且小于一定范围
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
# 判断条件一定有一个是False,避免逻辑出现False
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w],
self.hgeij[i][w] != self.hgeij[j][w],
self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set("timeout", timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m+2
def encoding(self, example, label):
Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]
Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]
Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]
Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in
range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])
Tk.append(And(*clause))
# print("Or(*Tk) , label=\n",Or(*Tk),label)
return Or(*Tk) == label
def solve_model(self): #求出取值 ####加了w
print("w", self.w)
#W_size = [2,3,4,5,6,7,8,9]
model = self.s.model()
self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.m)]
##用z3求解e(此处要改)
# self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]
# print("E= \n",self.E)
####改动
for i in range(self.m):
self.ei[i] = FormulaTemplate.W_size(self.w)
self.E = [self.ei[i] for i in range(self.m)]
print("E = \n",self.E)
####
self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]
self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.h)]
self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]
self.He = [
[bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hge = [
[bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hle = [
[bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.T = [
[bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
self.Nt = [
[bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
for i in range(self.m):
flag = True # 判断是否全部系数都相等
pix = -1
for am in self.M[i]:
if pix == -1:
if am != 0:
pix = am
elif am != 0 and am != pix:
flag = False
break
if flag: # 系数全部相同
if self.C[i] == 0:
# if co_prime(pix, self.E[i]):
# for j in range(self.n):
# if self.M[i][j] != 0:
# self.M[i][j] = 1
# else:
# div = gcd(pix, self.E[i])
# self.E[i] /= div
# for j in range(self.n):
# self.M[i][j] /= div
if not co_prime(pix, self.E[i]):
self.E[i] /= gcd(pix, self.E[i])
for j in range(self.n):
self.M[i][j] = 1
else:
div = gcd(pix, self.E[i], self.C[i])
self.E[i] /= div
self.C[i] /= div
pix /= div
for j in range(self.n):
self.M[i][j] /= div
div = gcd(int(pix), int(self.C[i]))
for j in range(self.n):
self.M[i][j] /= div
self.C[i] /= div
for i in range(self.h):
divisior = gcd(*self.A[i], self.B[i])
self.B[i] /= divisior
for j in range(self.n):
self.A[i][j] /= divisior
for i in range(len(self.E)):
self.E[i] = int(self.E[i])
def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True): #选择大于小于等于
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
if status == (True, False): #选择取模
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
# print("simplify(Or(*formu))=\n",simplify(Or(*formu)))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
# Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))
if status == (True, False):
# clause.append([Com % self.E[m] == self.C[m]])
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
# mod_clause.append(Com % self.E[m] == i)
mod_res = []
self.refine_modu(self.M[m], self.E[m], i, mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
if __name__ == '__main__':
# smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)
# smt.add([1, 2], True)
# smt.add([2, 3], False)
# print(smt.s)
# print(smt.check())
#
# arr = smt.refine_model()
# for a in arr:
# print(a)
#
# formu = smt.formula_model()
# print(formu)
# print('-' * 50)
# print(simplify(formu))
# print('-' * 50)
smt = EquTemplate(2)
smt.add([0, 1, 1])
smt.add([1, 2, 1])
smt.add([3, 6, 3])
if smt.check() == sat:
print(smt.solve_model()) # 1*v0 + 2*v1 + 1
else:
print(unsat)
|
normal
|
{
"blob_id": "81fce5314a7611de11648e412151112e29271871",
"index": 4626,
"step-1": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n <mask token>\n <mask token>\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n <mask token>\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n\n def solve_model(self):\n print('w', self.w)\n model = self.s.model()\n self.M = [[(model[self.amij[i][j]].as_long() if model[self.amij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.m)]\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print('E = \\n', self.E)\n self.C = [(model[self.ci[i]].as_long() if model[self.ci[i]] is not\n None else 0) for i in range(self.m)]\n self.A = [[(model[self.aeij[i][j]].as_long() if model[self.aeij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.h)]\n self.B = [(model[self.bi[i]].as_long() if model[self.bi[i]] is not\n None else 0) for i in range(self.h)]\n self.He = [[(bool(model[self.heij[i][j]]) if model[self.heij[i][j]]\n is not None else False) for j in range(self.h)] for i in range\n (self.k)]\n self.Hge = [[(bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.Hle = [[(bool(model[self.hleij[i][j]]) if model[self.hleij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.T = [[(bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not\n None else False) for j in range(self.m)] for i in range(self.k)]\n self.Nt = [[(bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]]\n is not None else False) for j in range(self.m)] for i in range\n (self.k)]\n for i in range(self.m):\n flag = True\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag:\n if self.C[i] == 0:\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-5": "import random\n\nfrom z3 import *\n\n\ndef combine(iter):\n tmp_list = [i for i in iter]\n res = tmp_list[0]\n for i in tmp_list[1:]:\n res += i\n return res\n\n\ndef co_prime(num1, num2):\n for num in range(2, min(num1, num2) + 1):\n if num1 % num == 0 and num2 % num == 0:\n return False\n return True\n\n\ndef gcd(*nums):\n min_num = 1 << 32\n for num in nums:\n if num != 0:\n min_num = min(min_num, abs(num))\n for i in range(min_num, 1, -1):\n flag = True\n for num in nums:\n if num % i != 0:\n flag = False\n break\n if flag:\n return i\n return 1\n\n\nclass FormulaTemplate:\n def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w\n self.k = k # amount of clause 多少个子句\n self.h = h # number of inequality 第一类不等式数量上限\n self.m = m # number of mode number 第二类不等式数量上限\n\n self.w = w\n\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.s = Solver()\n\n\n\n\n for i in range(h):\n # 不等式系数ae_ij不能全部为0\n self.s.add(Or(*[a > 0 for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))\n for i in range(m):\n # 模等式的系数am_ij不能全部小于等于0\n self.s.add(Or(*[am > 0 for am in self.amij[i]]))\n # 模等式的系数am_ij不能大于模e\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n # for j in range(i + 1, m):\n # self.s.add(Or(self.ei[i] != self.ei[j],\n # *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))\n # 余数c_i必须小于模e\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])\n # 模必须大于等于2,并且小于一定范围\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n # 判断条件一定有一个是False,避免逻辑出现False\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w],\n self.hgeij[i][w] != self.hgeij[j][w],\n self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],\n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n\n self.s.add(Or(*struct_const, *all_true))\n\n self.s.set(\"timeout\", timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m+2\n\n\n\n def encoding(self, example, label):\n Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]\n Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]\n Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]\n Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in\n range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])\n Tk.append(And(*clause))\n # print(\"Or(*Tk) , label=\\n\",Or(*Tk),label)\n return Or(*Tk) == label\n\n def solve_model(self): #求出取值 ####加了w\n print(\"w\", self.w)\n #W_size = [2,3,4,5,6,7,8,9]\n model = self.s.model()\n self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.m)]\n ##用z3求解e(此处要改)\n # self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]\n # print(\"E= \\n\",self.E)\n ####改动\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print(\"E = \\n\",self.E)\n ####\n self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]\n self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.h)]\n self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]\n self.He = [\n [bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hge = [\n [bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hle = [\n [bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.T = [\n [bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n self.Nt = [\n [bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n for i in range(self.m):\n flag = True # 判断是否全部系数都相等\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag: # 系数全部相同\n if self.C[i] == 0:\n # if co_prime(pix, self.E[i]):\n # for j in range(self.n):\n # if self.M[i][j] != 0:\n # self.M[i][j] = 1\n # else:\n # div = gcd(pix, self.E[i])\n # self.E[i] /= div\n # for j in range(self.n):\n # self.M[i][j] /= div\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True): #选择大于小于等于\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n if status == (True, False): #选择取模\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n # print(\"simplify(Or(*formu))=\\n\",simplify(Or(*formu)))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n # Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))\n if status == (True, False):\n # clause.append([Com % self.E[m] == self.C[m]])\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n # mod_clause.append(Com % self.E[m] == i)\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i, mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\nif __name__ == '__main__':\n # smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)\n # smt.add([1, 2], True)\n # smt.add([2, 3], False)\n # print(smt.s)\n # print(smt.check())\n #\n # arr = smt.refine_model()\n # for a in arr:\n # print(a)\n #\n # formu = smt.formula_model()\n # print(formu)\n # print('-' * 50)\n # print(simplify(formu))\n # print('-' * 50)\n\n smt = EquTemplate(2)\n smt.add([0, 1, 1])\n smt.add([1, 2, 1])\n smt.add([3, 6, 3])\n if smt.check() == sat:\n print(smt.solve_model()) # 1*v0 + 2*v1 + 1\n else:\n print(unsat)\n\n\n",
"step-ids": [
11,
14,
15,
16,
22
]
}
|
[
11,
14,
15,
16,
22
] |
from django import forms
from django.core import validators
class NameSearch(forms.Form):
name = forms.CharField(label='Search By Name')
|
normal
|
{
"blob_id": "7620ff333422d0354cc41c2a66444c3e8a0c011f",
"index": 1606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NameSearch(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NameSearch(forms.Form):\n name = forms.CharField(label='Search By Name')\n",
"step-4": "from django import forms\nfrom django.core import validators\n\n\nclass NameSearch(forms.Form):\n name = forms.CharField(label='Search By Name')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from config_pos import config
from backbone.resnet50 import ResNet50
from backbone.fpn import FPN
from module.rpn import RPN
from layers.pooler import roi_pooler
from det_oprs.bbox_opr import bbox_transform_inv_opr
from det_oprs.bbox_opr import bbox_transform_inv_opr_v2
from det_oprs.fpn_roi_target import fpn_roi_target
from det_oprs.loss_opr import softmax_loss, smooth_l1_loss
from det_oprs.utils import get_padded_tensor
class Network(nn.Module):
def __init__(self):
super().__init__()
self.resnet50 = ResNet50(config.backbone_freeze_at, False)
self.FPN = FPN(self.resnet50, 2, 6)
self.RPN = RPN(config.rpn_channel)
self.RCNN = RCNN()
def forward(self, image, im_info, gt_boxes=None):
image = (image - torch.tensor(config.image_mean[None, :, None, None]).type_as(image)) / (
torch.tensor(config.image_std[None, :, None, None]).type_as(image))
image = get_padded_tensor(image, 64)
if self.training:
return self._forward_train(image, im_info, gt_boxes)
else:
return self._forward_test(image, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
fpn_fms = self.FPN(image)
# fpn_fms stride: 64,32,16,8,4, p6->p2
rpn_rois, loss_dict_rpn = self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=1)
loss_dict_rcnn = self.RCNN(fpn_fms, rcnn_rois,
rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.FPN(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox, num_classes = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox.cpu().detach(), num_classes
class RCNN(nn.Module):
def __init__(self):
super().__init__()
# roi head
self.fc1 = nn.Linear(256*7*7, 1024)
self.fc2 = nn.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.constant_(l.bias, 0)
# box predictor
self.pred_cls = nn.Linear(1024, config.num_classes)
self.pred_delta = nn.Linear(1024, config.num_classes * 4)
for l in [self.pred_cls]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
for l in [self.pred_delta]:
nn.init.normal_(l.weight, std=0.001)
nn.init.constant_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# input p2-p5
fpn_fms = fpn_fms[1:][::-1]
stride = [4, 8, 16, 32]
pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7), "ROIAlignV2")
flatten_feature = torch.flatten(pool_features, start_dim=1)
flatten_feature = F.relu_(self.fc1(flatten_feature))
flatten_feature = F.relu_(self.fc2(flatten_feature))
pred_cls = self.pred_cls(flatten_feature)
pred_delta = self.pred_delta(flatten_feature)
if self.training:
# loss for regression
labels = labels.long().flatten()
fg_masks = labels > 0
valid_masks = labels >= 0
# multi class
pred_delta = pred_delta.reshape(-1, config.num_classes, 4)
fg_gt_classes = labels[fg_masks]
pred_delta = pred_delta[fg_masks, fg_gt_classes, :]
localization_loss = smooth_l1_loss(
# pred_regression,
pred_delta,
bbox_targets[fg_masks],
config.rcnn_smooth_l1_beta)
# loss for classification
objectness_loss = softmax_loss(pred_cls, labels, num_classes=config.num_classes)
objectness_loss = objectness_loss * valid_masks
normalizer = 1.0 / valid_masks.sum().item()
loss_rcnn_loc = localization_loss.sum() * normalizer
loss_rcnn_cls = objectness_loss.sum() * normalizer
loss_dict = {}
loss_dict['loss_rcnn_loc'] = loss_rcnn_loc
loss_dict['loss_rcnn_cls'] = loss_rcnn_cls
return loss_dict
else:
class_num = pred_cls.shape[-1] - 1
tag = torch.arange(class_num).type_as(pred_cls)+1
tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1,1)
pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)
pred_delta = pred_delta[:, 4:].reshape(-1, 4)
base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)
pred_bbox = restore_bbox(base_rois, pred_delta, True)
pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)
return pred_bbox, class_num
def restore_bbox(rois, deltas, unnormalize=True):
if unnormalize:
std_opr = torch.tensor(config.bbox_normalize_stds[None, :]).type_as(deltas)
mean_opr = torch.tensor(config.bbox_normalize_means[None, :]).type_as(deltas)
deltas = deltas * std_opr
deltas = deltas + mean_opr
pred_bbox = bbox_transform_inv_opr(rois, deltas)
return pred_bbox
|
normal
|
{
"blob_id": "6ac13665c2348bf251482f250c0fcc1fc1a8af75",
"index": 4721,
"step-1": "<mask token>\n\n\nclass Network(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.resnet50 = ResNet50(config.backbone_freeze_at, False)\n self.FPN = FPN(self.resnet50, 2, 6)\n self.RPN = RPN(config.rpn_channel)\n self.RCNN = RCNN()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass RCNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(256 * 7 * 7, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n for l in [self.fc1, self.fc2]:\n nn.init.kaiming_uniform_(l.weight, a=1)\n nn.init.constant_(l.bias, 0)\n self.pred_cls = nn.Linear(1024, config.num_classes)\n self.pred_delta = nn.Linear(1024, config.num_classes * 4)\n for l in [self.pred_cls]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n for l in [self.pred_delta]:\n nn.init.normal_(l.weight, std=0.001)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):\n fpn_fms = fpn_fms[1:][::-1]\n stride = [4, 8, 16, 32]\n pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7),\n 'ROIAlignV2')\n flatten_feature = torch.flatten(pool_features, start_dim=1)\n flatten_feature = F.relu_(self.fc1(flatten_feature))\n flatten_feature = F.relu_(self.fc2(flatten_feature))\n pred_cls = self.pred_cls(flatten_feature)\n pred_delta = self.pred_delta(flatten_feature)\n if self.training:\n labels = labels.long().flatten()\n fg_masks = labels > 0\n valid_masks = labels >= 0\n pred_delta = pred_delta.reshape(-1, config.num_classes, 4)\n fg_gt_classes = labels[fg_masks]\n pred_delta = pred_delta[fg_masks, fg_gt_classes, :]\n localization_loss = smooth_l1_loss(pred_delta, bbox_targets[\n fg_masks], config.rcnn_smooth_l1_beta)\n objectness_loss = softmax_loss(pred_cls, labels, num_classes=\n config.num_classes)\n objectness_loss = objectness_loss * valid_masks\n normalizer = 1.0 / valid_masks.sum().item()\n loss_rcnn_loc = localization_loss.sum() * normalizer\n loss_rcnn_cls = objectness_loss.sum() * normalizer\n loss_dict = {}\n loss_dict['loss_rcnn_loc'] = loss_rcnn_loc\n loss_dict['loss_rcnn_cls'] = loss_rcnn_cls\n return loss_dict\n else:\n class_num = pred_cls.shape[-1] - 1\n tag = torch.arange(class_num).type_as(pred_cls) + 1\n tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1, 1)\n pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)\n pred_delta = pred_delta[:, 4:].reshape(-1, 4)\n base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)\n pred_bbox = restore_bbox(base_rois, pred_delta, True)\n pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)\n return pred_bbox, class_num\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Network(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.resnet50 = ResNet50(config.backbone_freeze_at, False)\n self.FPN = FPN(self.resnet50, 2, 6)\n self.RPN = RPN(config.rpn_channel)\n self.RCNN = RCNN()\n\n def forward(self, image, im_info, gt_boxes=None):\n image = (image - torch.tensor(config.image_mean[None, :, None, None\n ]).type_as(image)) / torch.tensor(config.image_std[None, :,\n None, None]).type_as(image)\n image = get_padded_tensor(image, 64)\n if self.training:\n return self._forward_train(image, im_info, gt_boxes)\n else:\n return self._forward_test(image, im_info)\n <mask token>\n <mask token>\n\n\nclass RCNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(256 * 7 * 7, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n for l in [self.fc1, self.fc2]:\n nn.init.kaiming_uniform_(l.weight, a=1)\n nn.init.constant_(l.bias, 0)\n self.pred_cls = nn.Linear(1024, config.num_classes)\n self.pred_delta = nn.Linear(1024, config.num_classes * 4)\n for l in [self.pred_cls]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n for l in [self.pred_delta]:\n nn.init.normal_(l.weight, std=0.001)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):\n fpn_fms = fpn_fms[1:][::-1]\n stride = [4, 8, 16, 32]\n pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7),\n 'ROIAlignV2')\n flatten_feature = torch.flatten(pool_features, start_dim=1)\n flatten_feature = F.relu_(self.fc1(flatten_feature))\n flatten_feature = F.relu_(self.fc2(flatten_feature))\n pred_cls = self.pred_cls(flatten_feature)\n pred_delta = self.pred_delta(flatten_feature)\n if self.training:\n labels = labels.long().flatten()\n fg_masks = labels > 0\n valid_masks = labels >= 0\n pred_delta = pred_delta.reshape(-1, config.num_classes, 4)\n fg_gt_classes = labels[fg_masks]\n pred_delta = pred_delta[fg_masks, fg_gt_classes, :]\n localization_loss = smooth_l1_loss(pred_delta, bbox_targets[\n fg_masks], config.rcnn_smooth_l1_beta)\n objectness_loss = softmax_loss(pred_cls, labels, num_classes=\n config.num_classes)\n objectness_loss = objectness_loss * valid_masks\n normalizer = 1.0 / valid_masks.sum().item()\n loss_rcnn_loc = localization_loss.sum() * normalizer\n loss_rcnn_cls = objectness_loss.sum() * normalizer\n loss_dict = {}\n loss_dict['loss_rcnn_loc'] = loss_rcnn_loc\n loss_dict['loss_rcnn_cls'] = loss_rcnn_cls\n return loss_dict\n else:\n class_num = pred_cls.shape[-1] - 1\n tag = torch.arange(class_num).type_as(pred_cls) + 1\n tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1, 1)\n pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)\n pred_delta = pred_delta[:, 4:].reshape(-1, 4)\n base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)\n pred_bbox = restore_bbox(base_rois, pred_delta, True)\n pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)\n return pred_bbox, class_num\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Network(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.resnet50 = ResNet50(config.backbone_freeze_at, False)\n self.FPN = FPN(self.resnet50, 2, 6)\n self.RPN = RPN(config.rpn_channel)\n self.RCNN = RCNN()\n\n def forward(self, image, im_info, gt_boxes=None):\n image = (image - torch.tensor(config.image_mean[None, :, None, None\n ]).type_as(image)) / torch.tensor(config.image_std[None, :,\n None, None]).type_as(image)\n image = get_padded_tensor(image, 64)\n if self.training:\n return self._forward_train(image, im_info, gt_boxes)\n else:\n return self._forward_test(image, im_info)\n\n def _forward_train(self, image, im_info, gt_boxes):\n loss_dict = {}\n fpn_fms = self.FPN(image)\n rpn_rois, loss_dict_rpn = self.RPN(fpn_fms, im_info, gt_boxes)\n rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(rpn_rois,\n im_info, gt_boxes, top_k=1)\n loss_dict_rcnn = self.RCNN(fpn_fms, rcnn_rois, rcnn_labels,\n rcnn_bbox_targets)\n loss_dict.update(loss_dict_rpn)\n loss_dict.update(loss_dict_rcnn)\n return loss_dict\n <mask token>\n\n\nclass RCNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(256 * 7 * 7, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n for l in [self.fc1, self.fc2]:\n nn.init.kaiming_uniform_(l.weight, a=1)\n nn.init.constant_(l.bias, 0)\n self.pred_cls = nn.Linear(1024, config.num_classes)\n self.pred_delta = nn.Linear(1024, config.num_classes * 4)\n for l in [self.pred_cls]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n for l in [self.pred_delta]:\n nn.init.normal_(l.weight, std=0.001)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):\n fpn_fms = fpn_fms[1:][::-1]\n stride = [4, 8, 16, 32]\n pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7),\n 'ROIAlignV2')\n flatten_feature = torch.flatten(pool_features, start_dim=1)\n flatten_feature = F.relu_(self.fc1(flatten_feature))\n flatten_feature = F.relu_(self.fc2(flatten_feature))\n pred_cls = self.pred_cls(flatten_feature)\n pred_delta = self.pred_delta(flatten_feature)\n if self.training:\n labels = labels.long().flatten()\n fg_masks = labels > 0\n valid_masks = labels >= 0\n pred_delta = pred_delta.reshape(-1, config.num_classes, 4)\n fg_gt_classes = labels[fg_masks]\n pred_delta = pred_delta[fg_masks, fg_gt_classes, :]\n localization_loss = smooth_l1_loss(pred_delta, bbox_targets[\n fg_masks], config.rcnn_smooth_l1_beta)\n objectness_loss = softmax_loss(pred_cls, labels, num_classes=\n config.num_classes)\n objectness_loss = objectness_loss * valid_masks\n normalizer = 1.0 / valid_masks.sum().item()\n loss_rcnn_loc = localization_loss.sum() * normalizer\n loss_rcnn_cls = objectness_loss.sum() * normalizer\n loss_dict = {}\n loss_dict['loss_rcnn_loc'] = loss_rcnn_loc\n loss_dict['loss_rcnn_cls'] = loss_rcnn_cls\n return loss_dict\n else:\n class_num = pred_cls.shape[-1] - 1\n tag = torch.arange(class_num).type_as(pred_cls) + 1\n tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1, 1)\n pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)\n pred_delta = pred_delta[:, 4:].reshape(-1, 4)\n base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)\n pred_bbox = restore_bbox(base_rois, pred_delta, True)\n pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)\n return pred_bbox, class_num\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Network(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.resnet50 = ResNet50(config.backbone_freeze_at, False)\n self.FPN = FPN(self.resnet50, 2, 6)\n self.RPN = RPN(config.rpn_channel)\n self.RCNN = RCNN()\n\n def forward(self, image, im_info, gt_boxes=None):\n image = (image - torch.tensor(config.image_mean[None, :, None, None\n ]).type_as(image)) / torch.tensor(config.image_std[None, :,\n None, None]).type_as(image)\n image = get_padded_tensor(image, 64)\n if self.training:\n return self._forward_train(image, im_info, gt_boxes)\n else:\n return self._forward_test(image, im_info)\n\n def _forward_train(self, image, im_info, gt_boxes):\n loss_dict = {}\n fpn_fms = self.FPN(image)\n rpn_rois, loss_dict_rpn = self.RPN(fpn_fms, im_info, gt_boxes)\n rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(rpn_rois,\n im_info, gt_boxes, top_k=1)\n loss_dict_rcnn = self.RCNN(fpn_fms, rcnn_rois, rcnn_labels,\n rcnn_bbox_targets)\n loss_dict.update(loss_dict_rpn)\n loss_dict.update(loss_dict_rcnn)\n return loss_dict\n\n def _forward_test(self, image, im_info):\n fpn_fms = self.FPN(image)\n rpn_rois = self.RPN(fpn_fms, im_info)\n pred_bbox, num_classes = self.RCNN(fpn_fms, rpn_rois)\n return pred_bbox.cpu().detach(), num_classes\n\n\nclass RCNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(256 * 7 * 7, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n for l in [self.fc1, self.fc2]:\n nn.init.kaiming_uniform_(l.weight, a=1)\n nn.init.constant_(l.bias, 0)\n self.pred_cls = nn.Linear(1024, config.num_classes)\n self.pred_delta = nn.Linear(1024, config.num_classes * 4)\n for l in [self.pred_cls]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n for l in [self.pred_delta]:\n nn.init.normal_(l.weight, std=0.001)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):\n fpn_fms = fpn_fms[1:][::-1]\n stride = [4, 8, 16, 32]\n pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7),\n 'ROIAlignV2')\n flatten_feature = torch.flatten(pool_features, start_dim=1)\n flatten_feature = F.relu_(self.fc1(flatten_feature))\n flatten_feature = F.relu_(self.fc2(flatten_feature))\n pred_cls = self.pred_cls(flatten_feature)\n pred_delta = self.pred_delta(flatten_feature)\n if self.training:\n labels = labels.long().flatten()\n fg_masks = labels > 0\n valid_masks = labels >= 0\n pred_delta = pred_delta.reshape(-1, config.num_classes, 4)\n fg_gt_classes = labels[fg_masks]\n pred_delta = pred_delta[fg_masks, fg_gt_classes, :]\n localization_loss = smooth_l1_loss(pred_delta, bbox_targets[\n fg_masks], config.rcnn_smooth_l1_beta)\n objectness_loss = softmax_loss(pred_cls, labels, num_classes=\n config.num_classes)\n objectness_loss = objectness_loss * valid_masks\n normalizer = 1.0 / valid_masks.sum().item()\n loss_rcnn_loc = localization_loss.sum() * normalizer\n loss_rcnn_cls = objectness_loss.sum() * normalizer\n loss_dict = {}\n loss_dict['loss_rcnn_loc'] = loss_rcnn_loc\n loss_dict['loss_rcnn_cls'] = loss_rcnn_cls\n return loss_dict\n else:\n class_num = pred_cls.shape[-1] - 1\n tag = torch.arange(class_num).type_as(pred_cls) + 1\n tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1, 1)\n pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)\n pred_delta = pred_delta[:, 4:].reshape(-1, 4)\n base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)\n pred_bbox = restore_bbox(base_rois, pred_delta, True)\n pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)\n return pred_bbox, class_num\n\n\n<mask token>\n",
"step-5": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom config_pos import config\nfrom backbone.resnet50 import ResNet50\nfrom backbone.fpn import FPN\nfrom module.rpn import RPN\nfrom layers.pooler import roi_pooler\nfrom det_oprs.bbox_opr import bbox_transform_inv_opr\nfrom det_oprs.bbox_opr import bbox_transform_inv_opr_v2\nfrom det_oprs.fpn_roi_target import fpn_roi_target\nfrom det_oprs.loss_opr import softmax_loss, smooth_l1_loss\nfrom det_oprs.utils import get_padded_tensor\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n self.resnet50 = ResNet50(config.backbone_freeze_at, False)\n self.FPN = FPN(self.resnet50, 2, 6)\n self.RPN = RPN(config.rpn_channel)\n self.RCNN = RCNN()\n\n def forward(self, image, im_info, gt_boxes=None):\n image = (image - torch.tensor(config.image_mean[None, :, None, None]).type_as(image)) / (\n torch.tensor(config.image_std[None, :, None, None]).type_as(image))\n image = get_padded_tensor(image, 64)\n if self.training:\n return self._forward_train(image, im_info, gt_boxes)\n else:\n return self._forward_test(image, im_info)\n\n def _forward_train(self, image, im_info, gt_boxes):\n loss_dict = {}\n fpn_fms = self.FPN(image)\n # fpn_fms stride: 64,32,16,8,4, p6->p2\n rpn_rois, loss_dict_rpn = self.RPN(fpn_fms, im_info, gt_boxes)\n rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(\n rpn_rois, im_info, gt_boxes, top_k=1)\n loss_dict_rcnn = self.RCNN(fpn_fms, rcnn_rois,\n rcnn_labels, rcnn_bbox_targets)\n loss_dict.update(loss_dict_rpn)\n loss_dict.update(loss_dict_rcnn)\n return loss_dict\n\n def _forward_test(self, image, im_info):\n fpn_fms = self.FPN(image)\n rpn_rois = self.RPN(fpn_fms, im_info)\n pred_bbox, num_classes = self.RCNN(fpn_fms, rpn_rois)\n return pred_bbox.cpu().detach(), num_classes\n\nclass RCNN(nn.Module):\n def __init__(self):\n super().__init__()\n # roi head\n self.fc1 = nn.Linear(256*7*7, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n\n for l in [self.fc1, self.fc2]:\n nn.init.kaiming_uniform_(l.weight, a=1)\n nn.init.constant_(l.bias, 0)\n # box predictor\n self.pred_cls = nn.Linear(1024, config.num_classes)\n self.pred_delta = nn.Linear(1024, config.num_classes * 4)\n for l in [self.pred_cls]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n for l in [self.pred_delta]:\n nn.init.normal_(l.weight, std=0.001)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):\n # input p2-p5\n fpn_fms = fpn_fms[1:][::-1]\n stride = [4, 8, 16, 32]\n pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7), \"ROIAlignV2\")\n flatten_feature = torch.flatten(pool_features, start_dim=1)\n flatten_feature = F.relu_(self.fc1(flatten_feature))\n flatten_feature = F.relu_(self.fc2(flatten_feature))\n pred_cls = self.pred_cls(flatten_feature)\n pred_delta = self.pred_delta(flatten_feature)\n if self.training:\n # loss for regression\n labels = labels.long().flatten()\n fg_masks = labels > 0\n valid_masks = labels >= 0\n # multi class\n pred_delta = pred_delta.reshape(-1, config.num_classes, 4)\n fg_gt_classes = labels[fg_masks]\n pred_delta = pred_delta[fg_masks, fg_gt_classes, :]\n localization_loss = smooth_l1_loss(\n # pred_regression,\n pred_delta,\n bbox_targets[fg_masks],\n config.rcnn_smooth_l1_beta)\n # loss for classification\n objectness_loss = softmax_loss(pred_cls, labels, num_classes=config.num_classes)\n objectness_loss = objectness_loss * valid_masks\n normalizer = 1.0 / valid_masks.sum().item()\n loss_rcnn_loc = localization_loss.sum() * normalizer\n loss_rcnn_cls = objectness_loss.sum() * normalizer\n loss_dict = {}\n loss_dict['loss_rcnn_loc'] = loss_rcnn_loc\n loss_dict['loss_rcnn_cls'] = loss_rcnn_cls\n return loss_dict\n else:\n class_num = pred_cls.shape[-1] - 1\n tag = torch.arange(class_num).type_as(pred_cls)+1\n tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1,1)\n pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)\n pred_delta = pred_delta[:, 4:].reshape(-1, 4)\n base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)\n pred_bbox = restore_bbox(base_rois, pred_delta, True)\n pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)\n return pred_bbox, class_num\n\ndef restore_bbox(rois, deltas, unnormalize=True):\n if unnormalize:\n std_opr = torch.tensor(config.bbox_normalize_stds[None, :]).type_as(deltas)\n mean_opr = torch.tensor(config.bbox_normalize_means[None, :]).type_as(deltas)\n deltas = deltas * std_opr\n deltas = deltas + mean_opr\n pred_bbox = bbox_transform_inv_opr(rois, deltas)\n return pred_bbox\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
from time import strftime
from Stats.SQL.Compteur import compteurSQL
from Stats.SQL.Rapports import rapportsSQL
from Stats.SQL.Daily import dailySQL
from Stats.SQL.CompteurP4 import compteurJeuxSQL
from Stats.SQL.Historique import histoSQL, histoSQLJeux
from Stats.SQL.ConnectSQL import connectSQL
tableauMois={"01":"janvier","02":"février","03":"mars","04":"avril","05":"mai","06":"juin","07":"juillet","08":"aout","09":"septembre","10":"octobre","11":"novembre","12":"décembre","TO":"TOTAL"}
def exeClassic(count,id,nom,curseurGuild,guild):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,True,1,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,True,1,curseurGL)
connexion.commit()
liste=compteurSQL(curseurGL,"glob",id,(0,id,"TO","GL",count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,True,1,curseurGL)
if nom in ("Messages","Voice"):
compteurSQL(curseurGL,"dayRank",int(strftime("%y")+strftime("%m")+strftime("%d")),(0,int(strftime("%y")+strftime("%m")+strftime("%d")),strftime("%d"),strftime("%m"),strftime("%y"),count),count,None,None,None,None,False,3,curseurGL)
if nom in ("Emotes","Reactions"):
countGL=curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(id)).fetchone()["Count"]
for i in liste:
if i["Rank"]>400:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}".format(i["ID"]))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}".format(i["ID"]))
connexionGL.commit()
dailySQL(dateID,(strftime("%d"),strftime("%m"),strftime("%y")),nom,curseurGuild,guild.id,"Stats")
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"ranks",id,None,count,(0,id,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeObj(count,idObj,id,obj,guild,nom):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM {0}{1} WHERE ID={2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj)).fetchone()["Count"]<10:
curseur.execute("DROP TABLE {0}{1}{2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj))
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM to{0} WHERE ID={1}".format(strftime("%y"),idObj)).fetchone()["Count"]<25:
curseur.execute("DROP TABLE to{0}{1}".format(strftime("%y"),idObj))
connexion.commit()
liste=compteurSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions"):
if curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(idObj)).fetchone()["Count"]<50:
curseurGL.execute("DROP TABLE glob{0}".format(idObj))
if curseurGL.execute("SELECT Rank FROM glob WHERE ID={0}".format(idObj)).fetchone()["Rank"]>400:
for i in liste:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}{1}".format(i["ID"],idObj))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}{1}".format(i["ID"],idObj))
connexionGL.commit()
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"objs",idObj,id,count,(0,id,idObj,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeJeuxSQL(id,idObj,state,guild,curseurGuild,count,option,tours):
dictCount={"W":2,"L":-1}
dictW={"W":1,"L":0}
dictL={"W":0,"L":1}
connexionGL,curseurGL=connectSQL(guild,option,"Jeux","GL","")
connexion,curseur=connectSQL(guild,option,"Jeux",strftime("%m"),strftime("%y"))
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",True,state,5,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild,option,"Jeux","TO",strftime("%y"))
compteurJeuxSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",True,state,5,curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL,"glob",id,(0,id,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",True,state,5,curseurGL)
histoSQLJeux(curseurGL,id,tours,strftime("%d")+"/"+strftime("%m")+"/"+strftime("%y"),idObj,state)
connexionGL.commit()
dailySQL(int(strftime("%y")+strftime("%m")+strftime("%d")),(strftime("%d"),strftime("%m"),strftime("%y")),option,curseurGuild,guild,"Jeux")
|
normal
|
{
"blob_id": "19ff064f8c27b9796eb435c7d2b9ebf87ee90ad6",
"index": 7982,
"step-1": "<mask token>\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-2": "<mask token>\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-3": "<mask token>\ntableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':\n 'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':\n 'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':\n 'TOTAL'}\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-4": "from time import strftime\nfrom Stats.SQL.Compteur import compteurSQL\nfrom Stats.SQL.Rapports import rapportsSQL\nfrom Stats.SQL.Daily import dailySQL\nfrom Stats.SQL.CompteurP4 import compteurJeuxSQL\nfrom Stats.SQL.Historique import histoSQL, histoSQLJeux\nfrom Stats.SQL.ConnectSQL import connectSQL\ntableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':\n 'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':\n 'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':\n 'TOTAL'}\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-5": "from time import strftime\nfrom Stats.SQL.Compteur import compteurSQL\nfrom Stats.SQL.Rapports import rapportsSQL\nfrom Stats.SQL.Daily import dailySQL\nfrom Stats.SQL.CompteurP4 import compteurJeuxSQL\nfrom Stats.SQL.Historique import histoSQL, histoSQLJeux\nfrom Stats.SQL.ConnectSQL import connectSQL\n\ntableauMois={\"01\":\"janvier\",\"02\":\"février\",\"03\":\"mars\",\"04\":\"avril\",\"05\":\"mai\",\"06\":\"juin\",\"07\":\"juillet\",\"08\":\"aout\",\"09\":\"septembre\",\"10\":\"octobre\",\"11\":\"novembre\",\"12\":\"décembre\",\"TO\":\"TOTAL\"}\n\ndef exeClassic(count,id,nom,curseurGuild,guild):\n dateID=int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\"))\n connexionGL,curseurGL=connectSQL(guild.id,nom,\"Stats\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",strftime(\"%m\"),strftime(\"%y\"))\n compteurSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\"),id,(0,id,strftime(\"%m\"),strftime(\"%y\"),count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",False,True,1,curseurGL)\n connexion.commit()\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",\"TO\",strftime(\"%y\"))\n compteurSQL(curseur,\"to\"+strftime(\"%y\"),id,(0,id,\"TO\",strftime(\"%y\"),count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",False,True,1,curseurGL)\n connexion.commit()\n\n liste=compteurSQL(curseurGL,\"glob\",id,(0,id,\"TO\",\"GL\",count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",False,True,1,curseurGL)\n if nom in (\"Messages\",\"Voice\"):\n compteurSQL(curseurGL,\"dayRank\",int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),(0,int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),count),count,None,None,None,None,False,3,curseurGL)\n \n if nom in (\"Emotes\",\"Reactions\"):\n countGL=curseurGL.execute(\"SELECT Count FROM glob WHERE ID={0}\".format(id)).fetchone()[\"Count\"]\n for i in liste:\n if i[\"Rank\"]>400:\n curseurGL.execute(\"DROP TABLE IF EXISTS persoM{0}\".format(i[\"ID\"]))\n curseurGL.execute(\"DROP TABLE IF EXISTS persoA{0}\".format(i[\"ID\"]))\n connexionGL.commit()\n\n dailySQL(dateID,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),nom,curseurGuild,guild.id,\"Stats\")\n if nom not in (\"Mentions\",\"Mentionne\"):\n rapportsSQL(guild,\"ranks\",id,None,count,(0,id,strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),dateID,count,nom),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),nom)\n\ndef exeObj(count,idObj,id,obj,guild,nom):\n dateID=int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\"))\n connexionGL,curseurGL=connectSQL(guild.id,nom,\"Stats\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",strftime(\"%m\"),strftime(\"%y\"))\n compteurSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\")+str(idObj),id,(0,id,idObj,strftime(\"%m\"),strftime(\"%y\"),count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\") and curseur.execute(\"SELECT Count FROM {0}{1} WHERE ID={2}\".format(tableauMois[strftime(\"%m\")],strftime(\"%y\"),idObj)).fetchone()[\"Count\"]<10:\n curseur.execute(\"DROP TABLE {0}{1}{2}\".format(tableauMois[strftime(\"%m\")],strftime(\"%y\"),idObj))\n connexion.commit()\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",\"TO\",strftime(\"%y\"))\n compteurSQL(curseur,\"to\"+strftime(\"%y\")+str(idObj),id,(0,id,idObj,\"TO\",strftime(\"%y\"),count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\") and curseur.execute(\"SELECT Count FROM to{0} WHERE ID={1}\".format(strftime(\"%y\"),idObj)).fetchone()[\"Count\"]<25:\n curseur.execute(\"DROP TABLE to{0}{1}\".format(strftime(\"%y\"),idObj))\n connexion.commit()\n\n liste=compteurSQL(curseurGL,\"glob\"+str(idObj),id,(0,id,idObj,\"TO\",\"GL\",count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\"):\n if curseurGL.execute(\"SELECT Count FROM glob WHERE ID={0}\".format(idObj)).fetchone()[\"Count\"]<50:\n curseurGL.execute(\"DROP TABLE glob{0}\".format(idObj))\n if curseurGL.execute(\"SELECT Rank FROM glob WHERE ID={0}\".format(idObj)).fetchone()[\"Rank\"]>400:\n for i in liste:\n curseurGL.execute(\"DROP TABLE IF EXISTS persoM{0}{1}\".format(i[\"ID\"],idObj))\n curseurGL.execute(\"DROP TABLE IF EXISTS persoA{0}{1}\".format(i[\"ID\"],idObj))\n connexionGL.commit()\n\n if nom not in (\"Mentions\",\"Mentionne\"):\n rapportsSQL(guild,\"objs\",idObj,id,count,(0,id,idObj,strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),dateID,count,nom),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),nom)\n\ndef exeJeuxSQL(id,idObj,state,guild,curseurGuild,count,option,tours):\n dictCount={\"W\":2,\"L\":-1}\n dictW={\"W\":1,\"L\":0}\n dictL={\"W\":0,\"L\":1}\n connexionGL,curseurGL=connectSQL(guild,option,\"Jeux\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild,option,\"Jeux\",strftime(\"%m\"),strftime(\"%y\"))\n compteurJeuxSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\"),id,(0,id,strftime(\"%m\"),strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\")+str(idObj),id,(0,id,idObj,strftime(\"%m\"),strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",True,state,5,curseurGL)\n connexion.commit()\n\n connexion,curseur=connectSQL(guild,option,\"Jeux\",\"TO\",strftime(\"%y\"))\n compteurJeuxSQL(curseur,\"to\"+strftime(\"%y\"),id,(0,id,\"TO\",strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseur,\"to\"+strftime(\"%y\")+str(idObj),id,(0,id,idObj,\"TO\",strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",True,state,5,curseurGL)\n connexion.commit()\n\n compteurJeuxSQL(curseurGL,\"glob\",id,(0,id,\"TO\",\"GL\",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseurGL,\"glob\"+str(idObj),id,(0,id,idObj,\"TO\",\"GL\",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",True,state,5,curseurGL)\n histoSQLJeux(curseurGL,id,tours,strftime(\"%d\")+\"/\"+strftime(\"%m\")+\"/\"+strftime(\"%y\"),idObj,state)\n connexionGL.commit()\n\n dailySQL(int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),option,curseurGuild,guild,\"Jeux\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sys
import time
import math
from neopixel import *
count = int(sys.argv[1])
percent = int(sys.argv[2])
# LED strip configuration:
LED_COUNT = count # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
#LED_STRIP = ws.SK6812W_STRIP
lightUp = math.floor(percent/count)
# Intialize the library (must be called once before other functions).
def setPixel(strip):
for i in range(count):
if(i<lightUp):
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
|
normal
|
{
"blob_id": "5ff7a3843314dfd3914c5e96164385d61fbe7fa5",
"index": 684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-3": "<mask token>\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\nLED_COUNT = count\nLED_PIN = 18\nLED_FREQ_HZ = 800000\nLED_DMA = 5\nLED_BRIGHTNESS = 255\nLED_INVERT = False\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\nlightUp = math.floor(percent / count)\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-4": "import sys\nimport time\nimport math\nfrom neopixel import *\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\nLED_COUNT = count\nLED_PIN = 18\nLED_FREQ_HZ = 800000\nLED_DMA = 5\nLED_BRIGHTNESS = 255\nLED_INVERT = False\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\nlightUp = math.floor(percent / count)\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-5": "import sys\nimport time\nimport math\nfrom neopixel import *\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\n# LED strip configuration:\nLED_COUNT = count # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 5 # DMA channel to use for generating signal (try 5)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\t\n#LED_STRIP = ws.SK6812W_STRIP\nlightUp = math.floor(percent/count)\n# Intialize the library (must be called once before other functions).\ndef setPixel(strip):\n\tfor i in range(count):\n\t\tif(i<lightUp):\n\t\t\tstrip.setPixelColor(i, Color(0, 255, 0))\n\t\t\tstrip.show()\n\t\telse:\n\t\t\tstrip.setPixelColor(i, Color(255, 0, 0))\n\t\t\tstrip.show()\nif __name__ == '__main__':\n\tstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\t\t\n\tstrip.begin()\n\tsetPixel(strip)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from .log_config import LogBase
import os
__all__ = ['MyLog']
class MyLog(LogBase):
"""
功能:
将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz
参数:
:param dir_path: 日志记录的路径,默认是当前路径下的log文件夹
:param logger_name: logger对象的名字
:param info_name: 保存info等级的文件名字
:param error_name:
:param warning_name:
:param debug_name:
:param interval: 压缩日志的频率,默认是7天
:param detail: bool值,记录日志是否为详细记录
:param debug: 是否记录debug,默认不记录
:param info: 是否记录info,默认记录
:param error:
:param warning:
实例方法:
get_logger()-->logger
使用举例:
# 记录四种类型的日志
logger = MyLog(debug=True).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
# # # # # # # # # # # # # # # # # # # # # # # # #
# 只记录错误日志
logger = MyLog(info=False,warning=False).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
注意:
MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.
例如:
mylog = MyLog('./logs/logs/')
mylog2 = MyLog()
logger = mylog.get_logger()
logger2 = mylog2.get_logger()
logger.info('info')
logger2 = MyLog('./logs/logs2/').get_logger()
logger2.info('info2')
以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下
"""
def __init__(self, log_path: str = './logs/', **kwargs):
self.type_need(log_path, str)
if not log_path.endswith('/'):
log_path += '/'
if not os.path.exists(log_path):
os.makedirs(log_path)
super(MyLog, self).__init__(dir_path=log_path, **kwargs)
def get_logger(self):
return self._get_logger()
@staticmethod
def type_need(parm, type_):
if not isinstance(parm, type_):
raise TypeError(f'expect {type_},but got {type(parm)}')
|
normal
|
{
"blob_id": "3a9987ac326131878b80cb819e3d06ce2f4cb054",
"index": 8461,
"step-1": "<mask token>\n\n\nclass MyLog(LogBase):\n <mask token>\n <mask token>\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-2": "<mask token>\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-3": "<mask token>\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-4": "from .log_config import LogBase\nimport os\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\nfrom .log_config import LogBase\r\nimport os\r\n\r\n__all__ = ['MyLog']\r\n\r\n\r\nclass MyLog(LogBase):\r\n \"\"\"\r\n 功能:\r\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\r\n\r\n 参数:\r\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\r\n :param logger_name: logger对象的名字\r\n :param info_name: 保存info等级的文件名字\r\n :param error_name:\r\n :param warning_name:\r\n :param debug_name:\r\n :param interval: 压缩日志的频率,默认是7天\r\n :param detail: bool值,记录日志是否为详细记录\r\n :param debug: 是否记录debug,默认不记录\r\n :param info: 是否记录info,默认记录\r\n :param error:\r\n :param warning:\r\n 实例方法:\r\n get_logger()-->logger\r\n\r\n 使用举例:\r\n # 记录四种类型的日志\r\n logger = MyLog(debug=True).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n\r\n # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\n # 只记录错误日志\r\n logger = MyLog(info=False,warning=False).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n 注意:\r\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\r\n 例如:\r\n\r\n mylog = MyLog('./logs/logs/')\r\n mylog2 = MyLog()\r\n logger = mylog.get_logger()\r\n logger2 = mylog2.get_logger()\r\n logger.info('info')\r\n\r\n logger2 = MyLog('./logs/logs2/').get_logger()\r\n logger2.info('info2')\r\n\r\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\r\n\r\n\r\n\r\n \"\"\"\r\n\r\n def __init__(self, log_path: str = './logs/', **kwargs):\r\n self.type_need(log_path, str)\r\n if not log_path.endswith('/'):\r\n log_path += '/'\r\n if not os.path.exists(log_path):\r\n os.makedirs(log_path)\r\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\r\n\r\n def get_logger(self):\r\n return self._get_logger()\r\n\r\n @staticmethod\r\n def type_need(parm, type_):\r\n if not isinstance(parm, type_):\r\n raise TypeError(f'expect {type_},but got {type(parm)}')\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import sys
heights = []
for i in range(10):
line = sys.stdin.readline()
height = int(line)
heights.append(height)
heights.sort()
heights.reverse()
for i in range(3):
print(heights[i])
|
normal
|
{
"blob_id": "3e48de2e3b12965de1b3b5cb6c3cf68c90ec6212",
"index": 2274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-3": "<mask token>\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-4": "import sys\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def primeiras_ocorrencias(str):
dic = {}
for i, letra in enumerate(str):
if letra not in dic:
dic[letra] = i
return dic
|
normal
|
{
"blob_id": "bb1a6815649eb9e79e2ab1e110ea8acd8adce5aa",
"index": 3379,
"step-1": "<mask token>\n",
"step-2": "def primeiras_ocorrencias(str):\n dic = {}\n for i, letra in enumerate(str):\n if letra not in dic:\n dic[letra] = i\n return dic\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
num=int(input("Enter the number: "))
table=[num*i for i in range(1,11)]
print(table)
with open("table.txt","a") as f:
f.write(f"{num} table is: {str(table)}")
f.write('\n')
|
normal
|
{
"blob_id": "657ac500c40ddbd29f5e3736a78ed43e7d105478",
"index": 9417,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(table)\nwith open('table.txt', 'a') as f:\n f.write(f'{num} table is: {str(table)}')\n f.write('\\n')\n",
"step-3": "num = int(input('Enter the number: '))\ntable = [(num * i) for i in range(1, 11)]\nprint(table)\nwith open('table.txt', 'a') as f:\n f.write(f'{num} table is: {str(table)}')\n f.write('\\n')\n",
"step-4": "num=int(input(\"Enter the number: \"))\n\ntable=[num*i for i in range(1,11)]\nprint(table)\nwith open(\"table.txt\",\"a\") as f:\n f.write(f\"{num} table is: {str(table)}\")\n f.write('\\n')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import re
BASICPATTERN = '[!/](%s)\s{,1}(.*)' # example "/animefind baka" -> (animefind, baka)
# returns compiled BASICPATTERN for each given string
def basicRegex(strings):
if not isinstance(strings,list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
|
normal
|
{
"blob_id": "1a28aea824752d18cbd462693f8f8980dba4974e",
"index": 9387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-3": "<mask token>\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-4": "import re\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-5": "import re\n\nBASICPATTERN = '[!/](%s)\\s{,1}(.*)' # example \"/animefind baka\" -> (animefind, baka)\n\n\n# returns compiled BASICPATTERN for each given string\ndef basicRegex(strings):\n if not isinstance(strings,list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# bot.py
import os
import shutil
import discord
import youtube_dl
from discord.ext import commands
import urllib.parse
import urllib.request
import re
import dotenv
from pathlib import Path # Python 3.6+ only
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = discord.Client()
botCommand = commands.Bot(command_prefix='.')
token = os.getenv("DISCORD_TOKEN")
players = {}
@botCommand.event
async def on_ready():
print(
f'{client.user} is connected to the following guild:\n'
)
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({
'search_query': search
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
voice = None
q_num = 0
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({
'search_query': url
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print("ok")
else:
voice = await channel.connect()
await ctx.send(f"Joined {channel}")
# if voice is None:
# voice = await channel.connect()
# song_there = os.path.isfile("song.mp3")
def check_queue():
print('Test')
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath("Queue"))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print("No more queue\n")
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath("Queue") + "\\" + first_file)
if length != 0:
print("Song done , playing next queue\n")
print(f"song still in queue: {still_q}")
song_there = os.path.isfile("song.mp3")
if song_there:
os.remove("song.mp3")
shutil.move(song_path, main_location)
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print("No song founds")
def add_queue():
print("Test")
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is False:
os.mkdir("Queue")
DIR = os.path.abspath(os.path.realpath("Queue"))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath("Queue") + f"\song{q_num}.%(ext)s")
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'outtmpl': queue_path,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
print("Song added to queue\n")
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
queues.clear()
print("remove old song file")
except PermissionError:
add_queue()
await ctx.send("Adding song to the queue")
return
Queue_infile = os.path.isdir("./Queue")
try:
Queue_folder = "./Queue"
if Queue_infile is True:
print("Removed old Queue folder")
shutil.rmtree(Queue_folder)
except:
print("No old queue folder")
await ctx.send("Getting everything ready now")
# voice = get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
name = file
print(f"renamed file : {file}\n")
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit("-", 1)
await ctx.send(f"Playing :notes: `{nname[0]}` :notes:")
print("Playing\n")
queues = {}
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send('- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
|
normal
|
{
"blob_id": "94ca18088664393fdfdc68bfb8bcad8b78e9e36a",
"index": 7887,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv(dotenv_path=env_path)\n<mask token>\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\[email protected](pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\n<mask token>\n\n\[email protected](pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\n<mask token>\n\n\[email protected](pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\[email protected](pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-3": "<mask token>\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv('DISCORD_TOKEN')\nplayers = {}\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\[email protected](pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\nq_num = 0\n\n\[email protected](pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\nqueues = {}\n\n\[email protected](pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\[email protected](pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-4": "import os\nimport shutil\nimport discord\nimport youtube_dl\nfrom discord.ext import commands\nimport urllib.parse\nimport urllib.request\nimport re\nimport dotenv\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv('DISCORD_TOKEN')\nplayers = {}\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\[email protected](pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\nq_num = 0\n\n\[email protected](pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\nqueues = {}\n\n\[email protected](pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\[email protected](pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-5": "# bot.py\nimport os\nimport shutil\nimport discord\nimport youtube_dl\nfrom discord.ext import commands\nimport urllib.parse\nimport urllib.request\nimport re\nimport dotenv\nfrom pathlib import Path # Python 3.6+ only\nfrom dotenv import load_dotenv\n\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\n\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv(\"DISCORD_TOKEN\")\nplayers = {}\n\n\[email protected]\nasync def on_ready():\n print(\n f'{client.user} is connected to the following guild:\\n'\n )\n\n\[email protected](pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({\n 'search_query': search\n })\n\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string\n )\n print(r'/watch\\?v=(.{11})')\n\n search_results = re.findall(r'/watch\\?v=(.{11})', htm_content.read().decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\n\nq_num = 0\n\n\[email protected](pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({\n 'search_query': url\n })\n\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string\n )\n print(r'/watch\\?v=(.{11})')\n\n search_results = re.findall(r'/watch\\?v=(.{11})', htm_content.read().decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n\n if voice:\n print(\"ok\")\n else:\n\n voice = await channel.connect()\n await ctx.send(f\"Joined {channel}\")\n\n # if voice is None:\n # voice = await channel.connect()\n # song_there = os.path.isfile(\"song.mp3\")\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir(\"./Queue\")\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath(\"Queue\"))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print(\"No more queue\\n\")\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath(\"Queue\") + \"\\\\\" + first_file)\n if length != 0:\n print(\"Song done , playing next queue\\n\")\n print(f\"song still in queue: {still_q}\")\n song_there = os.path.isfile(\"song.mp3\")\n if song_there:\n os.remove(\"song.mp3\")\n shutil.move(song_path, main_location)\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n os.rename(file, 'song.mp3')\n\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print(\"No song founds\")\n\n def add_queue():\n print(\"Test\")\n Queue_infile = os.path.isdir(\"./Queue\")\n if Queue_infile is False:\n os.mkdir(\"Queue\")\n DIR = os.path.abspath(os.path.realpath(\"Queue\"))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n\n queue_path = os.path.abspath(os.path.realpath(\"Queue\") + f\"\\song{q_num}.%(ext)s\")\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'quiet': True,\n 'outtmpl': queue_path,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192'\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(\"Downloading audio now\\n\")\n ydl.download([url])\n\n print(\"Song added to queue\\n\")\n\n song_there = os.path.isfile(\"song.mp3\")\n try:\n if song_there:\n os.remove(\"song.mp3\")\n queues.clear()\n print(\"remove old song file\")\n except PermissionError:\n add_queue()\n await ctx.send(\"Adding song to the queue\")\n return\n\n Queue_infile = os.path.isdir(\"./Queue\")\n try:\n Queue_folder = \"./Queue\"\n if Queue_infile is True:\n print(\"Removed old Queue folder\")\n shutil.rmtree(Queue_folder)\n except:\n print(\"No old queue folder\")\n\n await ctx.send(\"Getting everything ready now\")\n\n # voice = get(client.voice_clients, guild=ctx.guild)\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'quiet': True,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192'\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(\"Downloading audio now\\n\")\n ydl.download([url])\n\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n name = file\n print(f\"renamed file : {file}\\n\")\n os.rename(file, \"song.mp3\")\n\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n\n nname = name.rsplit(\"-\", 1)\n await ctx.send(f\"Playing :notes: `{nname[0]}` :notes:\")\n print(\"Playing\\n\")\n\n\nqueues = {}\n\n\[email protected](pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\[email protected](pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n\n msg1 = '<@333863300892721152> davis kok pepe ya'\n\n if message.content == 'command list':\n await message.channel.send('- davis mah\\n- davis\\n- .plays + youtubeURL')\n\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db.models import Q, Avg
from django.http import JsonResponse
from rest_framework import permissions
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from base_backend import permissions as my_perms
from base_backend.utils import RequestDataFixer
from restaurants.models import User, Cuisine, MealType, AppVersion, RestaurantType, Restaurant, Menu, Order, OrderLine, \
Wilaya, City, Address, Phone
from restaurants.serializers import UserSerializer, SmsConfirmationSerializer, CuisineSerializer, \
RestaurantTypeSerializer, RestaurantSerializer, MenuSerializer, OrderLineSerializer, WilayaSerializer, \
CitySerializer, OrderWRestaurantSerializer, MealTypesWithMenuSerializer, MealTypeSerializer, OrderSerializer, \
AddressSerializer, PhoneSerializer
class LoginApi(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context=dict(request=request))
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response(
dict(
token=token.key,
user_id=user.pk,
phone=user.phone,
email=user.email,
type=user.user_type,
photo=user.photo.url if user.photo else None,
address=user.address,
city=user.lives_in_id,
birth_date=user.birth_date,
username=user.username,
# is_participant=user.client.is_participant if user.client is not None else None,
# participant_id=user.client.participant.participant_id if user.client else None,
)
)
class UserViewSet(ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.filter(is_active=True)
def get_permissions(self):
if self.action == 'create' or self.action == 'register':
return [permissions.AllowAny()]
else:
return [permissions.IsAuthenticatedOrReadOnly()]
@action(methods=['post'], detail=False, url_path='register', permission_classes=[permissions.AllowAny()])
def register(self, request, *args, **kwargs):
response = super().create(request, *args, **kwargs)
if response:
response.data = dict(status=True, code=4)
return response
def create(self, request, *args, **kwargs):
return self.register(request, *args, **kwargs)
class OtpApi(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
serializer = SmsConfirmationSerializer(data=request.GET)
result = serializer.resend()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=21)
return Response(response)
def put(self, request):
serializer = SmsConfirmationSerializer(data=request.data)
result = serializer.activate()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=20)
return Response(response)
class CuisineViewSet(ModelViewSet):
serializer_class = CuisineSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Cuisine.objects.all()
class MealTypeViewSet(ModelViewSet):
permission_classes = [my_perms.IsAdminOrReadOnly]
serializer_class = MealTypeSerializer
queryset = MealType.objects.all()
def get_serializer(self, *args, **kwargs):
if self.action == "get_types_with_menus":
serializer_class = MealTypesWithMenuSerializer
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)
@action(['get'], detail=False, url_path="type-with-menus", )
def get_types_with_menus(self, request, *args, **kwargs):
types = self.get_queryset().filter(menus__offered_by=request.query_params.get('restaurant', 0))
types = self.get_serializer(types, many=True).data
return Response(types)
class RestaurantTypeViewSet(ModelViewSet):
serializer_class = RestaurantTypeSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = RestaurantType.objects.all()
class RestaurantViewSet(ModelViewSet):
serializer_class = RestaurantSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Restaurant.objects.all()
def _get_recommended_restaurants(self) -> queryset:
queryset = self.get_queryset()
recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))
return recommended
def _get_special_restaurants(self) -> queryset:
queryset = self.get_queryset()
special_offers_restaurants = queryset.filter(Q(menus__discount__gt=0) | Q(on_special_day=True))
return special_offers_restaurants
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
recommended = self._get_recommended_restaurants().order_by('?')[:5]
special = self._get_special_restaurants().order_by('?')[:5]
all_restaurants = self.get_queryset().order_by('?')[:5]
recommended = self.get_serializer(recommended, many=True).data
special = self.get_serializer(special, many=True).data
all_restaurants = self.get_serializer(all_restaurants, many=True).data
response = {
'recommended': recommended,
'special': special,
'all': all_restaurants
}
return Response(response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_special_restaurants().order_by('-created_at'), many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_recommended_restaurants().order_by('-rates_avg'), many=True)
return Response(serializer.data)
@action(['get'], detail=True, url_path="restaurant-menus")
def get_restaurant_menus(self, request, *args, **kwargs):
categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(restaurant_id=self.kwargs.get('pk'))
return Response(categorized_menus)
class MenuViewSet(ModelViewSet):
serializer_class = MenuSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Menu.objects.all()
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]
recommended = queryset.all().order_by('?')[:5]
special_offers = self.get_serializer(special_offers, many=True).data
recommended = self.get_serializer(recommended, many=True).data
response = {
'recommended': recommended,
'special_offers': special_offers
}
return Response(data=response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at')
serializer = self.get_serializer(special_offers, many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
recommended = queryset.all().order_by('-created_at')
serializer = self.get_serializer(recommended, many=True)
return Response(serializer.data)
class OrderViewSet(ModelViewSet):
serializer_class = OrderWRestaurantSerializer
permission_classes = [permissions.IsAuthenticated]
queryset = Order.objects.all().order_by('-created_at')
def get_serializer(self, *args, **kwargs):
if self.action == "create":
return OrderSerializer(*args, **kwargs)
return super(OrderViewSet, self).get_serializer(*args, **kwargs)
def get_queryset(self):
return super(OrderViewSet, self).get_queryset().filter(client=self.request.user.client)
def create(self, request, *args, **kwargs):
fixer = RequestDataFixer(request=request)
return super(OrderViewSet, self).create(fixer, *args, **kwargs)
class OrderLineViewSet(ModelViewSet):
serializer_class = OrderLineSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = OrderLine.objects.all()
class WilayaViewSet(ModelViewSet):
serializer_class = WilayaSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Wilaya.objects.all()
class CityViewSet(ModelViewSet):
serializer_class = CitySerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = City.objects.all()
def version(request):
print('inside this')
if request.GET.get('code', None):
code = request.GET.get('code')
AppVersion.objects.all().update(code=code)
return JsonResponse({'updated': True})
else:
code = AppVersion.objects.all().first().code
return JsonResponse({'code': code})
class AddressViewSet(ModelViewSet):
serializer_class = AddressSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Address.objects.all()
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.user.client).update(default=False)
return Response(self.get_serializer(instance).data)
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(belongs_to=request.user.client).update(default=False)
return Response({"status": True})
def get_queryset(self):
return super(AddressViewSet, self).get_queryset().filter(belongs_to=self.request.user.client)
class PhoneViewSet(ModelViewSet):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
serializer_class = PhoneSerializer
queryset = Phone.objects.all()
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(user=request.user).update(default=False)
return Response({"status": True})
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), user=request.user).update(default=False)
return Response(self.get_serializer(instance).data)
def get_queryset(self):
return self.get_queryset().filter(user=self.request.user)
|
normal
|
{
"blob_id": "9e8b5cebd48b3b98e421c896d9835ada5ec4166e",
"index": 2740,
"step-1": "<mask token>\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-2": "<mask token>\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-3": "<mask token>\n\n\nclass CuisineViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-4": "<mask token>\n\n\nclass CuisineViewSet(ModelViewSet):\n serializer_class = CuisineSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Cuisine.objects.all()\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-5": "from django.db.models import Q, Avg\nfrom django.http import JsonResponse\nfrom rest_framework import permissions\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom base_backend import permissions as my_perms\nfrom base_backend.utils import RequestDataFixer\nfrom restaurants.models import User, Cuisine, MealType, AppVersion, RestaurantType, Restaurant, Menu, Order, OrderLine, \\\n Wilaya, City, Address, Phone\nfrom restaurants.serializers import UserSerializer, SmsConfirmationSerializer, CuisineSerializer, \\\n RestaurantTypeSerializer, RestaurantSerializer, MenuSerializer, OrderLineSerializer, WilayaSerializer, \\\n CitySerializer, OrderWRestaurantSerializer, MealTypesWithMenuSerializer, MealTypeSerializer, OrderSerializer, \\\n AddressSerializer, PhoneSerializer\n\n\nclass LoginApi(ObtainAuthToken):\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data,\n context=dict(request=request))\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n\n return Response(\n dict(\n token=token.key,\n user_id=user.pk,\n phone=user.phone,\n email=user.email,\n type=user.user_type,\n photo=user.photo.url if user.photo else None,\n address=user.address,\n city=user.lives_in_id,\n birth_date=user.birth_date,\n username=user.username,\n # is_participant=user.client.is_participant if user.client is not None else None,\n # participant_id=user.client.participant.participant_id if user.client else None,\n )\n )\n\n\nclass UserViewSet(ModelViewSet):\n serializer_class = UserSerializer\n queryset = User.objects.filter(is_active=True)\n\n def get_permissions(self):\n if self.action == 'create' or self.action == 'register':\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAuthenticatedOrReadOnly()]\n\n @action(methods=['post'], detail=False, url_path='register', permission_classes=[permissions.AllowAny()])\n def register(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n if response:\n response.data = dict(status=True, code=4)\n return response\n\n def create(self, request, *args, **kwargs):\n return self.register(request, *args, **kwargs)\n\n\nclass OtpApi(APIView):\n permission_classes = [permissions.AllowAny]\n\n def get(self, request):\n serializer = SmsConfirmationSerializer(data=request.GET)\n result = serializer.resend()\n if result:\n response = dict(status=True, code=5)\n else:\n response = dict(status=False, code=21)\n return Response(response)\n\n def put(self, request):\n serializer = SmsConfirmationSerializer(data=request.data)\n result = serializer.activate()\n if result:\n response = dict(status=True, code=5)\n else:\n response = dict(status=False, code=20)\n return Response(response)\n\n\nclass CuisineViewSet(ModelViewSet):\n serializer_class = CuisineSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Cuisine.objects.all()\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"get_types_with_menus\":\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path=\"type-with-menus\", )\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) -> queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) -> queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path=\"get-home\")\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {\n 'recommended': recommended,\n 'special': special,\n 'all': all_restaurants\n }\n return Response(response)\n\n @action(['get'], detail=False, url_path=\"special-offers\")\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path=\"recommended-offers\")\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants().order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path=\"restaurant-menus\")\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path=\"get-home\")\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {\n 'recommended': recommended,\n 'special_offers': special_offers\n }\n return Response(data=response)\n\n @action(['get'], detail=False, url_path=\"special-offers\")\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at')\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path=\"recommended-offers\")\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"create\":\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\ndef version(request):\n print('inside this')\n if request.GET.get('code', None):\n code = request.GET.get('code')\n AppVersion.objects.all().update(code=code)\n return JsonResponse({'updated': True})\n else:\n code = AppVersion.objects.all().first().code\n return JsonResponse({'code': code})\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path=\"set-default\", url_name='set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path=\"set-main\", url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(default=False)\n return Response({\"status\": True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to=self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path=\"set-main\", url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({\"status\": True})\n\n @action(['PUT'], detail=True, url_path=\"set-default\", url_name='set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-ids": [
34,
40,
41,
42,
56
]
}
|
[
34,
40,
41,
42,
56
] |
# https://github.com/jscancella/NYTribuneOCRExperiments/blob/master/findText_usingSums.py
import os
import io
from pathlib import Path
import sys
os.environ['OPENCV_IO_ENABLE_JASPER']='True' # has to be set before importing cv2 otherwise it won't read the variable
import numpy as np
import cv2
import subprocess
from multiprocessing import Pool
from scipy.signal import find_peaks, find_peaks_cwt
import scipy.ndimage as ndimage
from IPython.display import Image as KImage
#custom kernel that is used to blend together text in the Y axis
DILATE_KERNEL = np.array([
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype=np.uint8)
# Run adaptative thresholding (is slow af compared to not using it in pipeline)
def adaptative_thresholding(img, threshold):
# Load image
I = img
# Convert image to grayscale
gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
# Original image size
orignrows, origncols = gray.shape
# Windows size
M = int(np.floor(orignrows/16) + 1)
N = int(np.floor(origncols/16) + 1)
# Image border padding related to windows size
Mextend = round(M/2)-1
Nextend = round(N/2)-1
# Padding image
aux =cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=Nextend,
right=Nextend, borderType=cv2.BORDER_REFLECT)
windows = np.zeros((M,N),np.int32)
# Image integral calculation
imageIntegral = cv2.integral(aux, windows,-1)
# Integral image size
nrows, ncols = imageIntegral.shape
# Memory allocation for cumulative region image
result = np.zeros((orignrows, origncols))
# Image cumulative pixels in windows size calculation
for i in range(nrows-M):
for j in range(ncols-N):
result[i, j] = imageIntegral[i+M, j+N] - imageIntegral[i, j+N]+ imageIntegral[i, j] - imageIntegral[i+M,j]
# Output binary image memory allocation
binar = np.ones((orignrows, origncols), dtype=np.bool)
# Gray image weighted by windows size
graymult = (gray).astype('float64')*M*N
# Output image binarization
binar[graymult <= result*(100.0 - threshold)/100.0] = False
# binary image to UINT8 conversion
binar = (255*binar).astype(np.uint8)
return binar
def Q_test(sorted_data):
conf95_level = {3: .97, 4: .829, 5: .71, 6: .625, 7: .568, 8: .526, 9: .493}
q_exp = abs(sorted_data[1] - sorted_data[0]) / abs(sorted_data[-1] - sorted_data[0])
print(str(abs(sorted_data[1] - sorted_data[0])) + ' / ' + str(abs(sorted_data[-1] - sorted_data[0])))
print("q_exp : " + str(q_exp))
return q_exp > conf95_level[min(9, len(sorted_data))]
# static variables for clarity
COLUMNS = 0
GREEN = (0, 255, 0)
# parameters that can be tweaked
LINE_THICKNESS = 3 # how thick to make the line around the found contours in the debug output
PADDING = 10 # padding to add around the found possible column to help account for image skew and such
CREATE_COLUMN_OUTLINE_IMAGES = True # if we detect that we didn't find all the columns. Create a debug image (tiff) showing the columns that were found
def columnIndexes(a):
"""
creates pair of indexes for left and right index of the image column
For example [13, 1257, 2474, 3695, 4907, 6149]
becomes: [[13 1257], [1257 2474], [2474 3695], [3695 4907], [4907 6149]]
"""
nrows = (a.size-2)+1
return a[1*np.arange(nrows)[:,None] + np.arange(2)]
def convertToGrayscale(img):
temp_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return temp_img
def invert(img):
""" Black -> White | White -> Black """
print("invert image")
# Should we edit these parameters?
#3/18/21 - experimented on threshold, 140 is good.
_,temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)
return temp_img
def dilateDirection(img, debug=False):
"""
It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'.
So it increases the white region in the image or size of foreground object increases.
Normally, in cases like noise removal, erosion is followed by dilation.
Because, erosion removes white noises, but it also shrinks our object.
So we dilate it. Since noise is gone, they won't come back, but our object area increases.
It is also useful in joining broken parts of an object.
"""
print("applying dilation morph")
temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15) #the more iterations the more the text gets stretched in the Y axis, 15 seems about right.
'''
if debug:
filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)
cv2.imwrite(filepath, temp_img)
'''
return temp_img
def createColumnImages(img, basename, directory):
"""
we sum each column of the inverted image. The columns should show up as peaks in the sums
uses scipy.signal.find_peaks to find those peaks and use them as column indexes
"""
files = []
temp_img = convertToGrayscale(img)
temp_img = invert(temp_img)
temp_img = dilateDirection(temp_img)
sums = np.sum(temp_img, axis = COLUMNS)
sums[0] = 1000 # some random value so that find_peaks properly detects the peak for the left most column
sums = sums * -4 # invert so that minimums become maximums and exagerate the data so it is more clear what the peaks are
peaks, _ = find_peaks(sums, distance=600) # the column indexs of the img array, spaced at least 800 away from the previous peak
sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))
sorted_sums = sorted(sum_to_index.keys())
'''
qr = Q_test(sorted_sums)
if qr:
peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]
'''
print("PeakNum, Sum, QRemove for " + basename)
for x in peaks:
print(str(x) + ', ' + str(sums[x]))
print("----------")
if peaks.size == 0:
with open('troublesomeImages.txt', 'a') as f:
print("ERROR: something went wrong with finding the peaks for image: ", os.path.join(directory, basename))
f.write(os.path.join(directory, basename) + ".jpg 0\n")
return files
peaks[0] = 0 # automatically make the left most column index the start of the image
peaks[-1] =sums.size -1 # automatically make the right most column index the end of the image
boxed = np.copy(img)
if peaks.size < 6:
with open('troublesomeImages.txt', 'a') as f:
print("found image that is causing problems: ", os.path.join(directory, basename))
f.write(os.path.join(directory, basename) + ".jpg " + str(peaks.size) + "\n")
columnIndexPairs = columnIndexes(peaks)
ystart = 0
yend = img.shape[0]
for columnIndexPair in columnIndexPairs:
xstart = max(columnIndexPair[0]-PADDING, 0)
xend = min(columnIndexPair[1]+PADDING, img.shape[1])
if not os.path.exists(directory):
os.makedirs(directory)
filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (basename, xstart,xend))
files.append(filepath)
crop_img = img[ystart:yend, xstart:xend]
print("writing out cropped image: ", filepath)
# Apply adaptative thresholding to the image with a threshold of 25/100
#crop_img = adaptative_thresholding(crop_img, 25)
if not cv2.imwrite(filepath, crop_img):
print('failed')
if CREATE_COLUMN_OUTLINE_IMAGES:
cv2.rectangle(boxed,(xstart,ystart),(xend,yend), GREEN, LINE_THICKNESS)
if CREATE_COLUMN_OUTLINE_IMAGES:
filepath = os.path.join(directory, '%s-contours.jpeg' % basename)
cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])
# For removing the old image?
# os.remove(os.path.join(directory, basename + ".jp2"))
return files
def invert_experiment():
test_img = cv2.imread('./ocr/data/8k71pf94q/1_commonwealth_8k71pf94q_accessFull.jpg')
for thresh in range(1, 200, 20):
print('writing thresh= ' + str(thresh))
_,temp_img = cv2.threshold(test_img, thresh, 255, cv2.THRESH_BINARY_INV)
cv2.imwrite('./ocr/test_images/thresh='+str(thresh)+'.jpg', temp_img)
def test(img, basename):
#h, w, _ = img.shape
#test_img = cv2.imread('./ocr/data/8k71pf94q/2_commonwealth_8k71pf94q_accessFull.jpg')
test_img = convertToGrayscale(img)
#ret,test_img = cv2.threshold(test_img,25,255,0)
#cv2.imwrite('./ocr/test_images/contours/'+basename+'prepixelcrop.jpg', test_img)
#test_img = test_img[10:h-10, 10: w-10]
#y_nonzero, x_nonzero = np.nonzero(test_img)
#test_img = test_img[np.min(y_nonzero):np.max(y_nonzero), np.min(x_nonzero):np.max(x_nonzero)]
test_img = invert(test_img)
test_img = dilateDirection(test_img)
#contours,hierarchy = cv2.findContours(test_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnt = contours[0]
#x,y,w,h = cv2.boundingRect(cnt)
#test_img = cv2.rectangle(img,(10,10),(w-10, h-10), GREEN, LINE_THICKNESS)
#test_img = cv2.drawContours(test_img, contours, -1, GREEN, LINE_THICKNESS)
#crop = test_img[y:y+h,x:x+w]
cv2.imwrite('./ocr/test_images/contours/'+basename+'dilated.jpg', test_img)
'''
for r in range(0, 40, 5):
name = 'rank=' + str(r) + ".jpg"
path = './ocr/test_images/' + name
new_img = ndimage.rank_filter(test_img, rank=r, size=20)
print("writing " + name)
cv2.imwrite(path, new_img)
'''
#cv2.imwrite('./ocr/test_images/inverted.jpg', test_img)
if __name__ == "__main__":
print("STARTING")
for f in os.listdir('./ocr/data/gb19gw39h/'):
if f.endswith(".jpg"):
#test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])
createColumnImages(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0], './ocr/columns/gb19gw39h/')
for f in os.listdir('./ocr/data/8k71pf94q/'):
if f.endswith(".jpg"):
#test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])
createColumnImages(cv2.imread(os.path.join('./ocr/data/8k71pf94q/', f)), '8k71pf94q-' + f[0], './ocr/columns/8k71pf94q/')
for f in os.listdir('./ocr/data/mc87rq85m/'):
if f.endswith(".jpg"):
#test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])
createColumnImages(cv2.imread(os.path.join('./ocr/data/mc87rq85m/', f)), 'mc87rq85m-' + f[0], './ocr/columns/mc87rq85m/')
'''
data_folder = './ocr/data/'
for folder in os.listdir(data_folder):
if folder == ".DS_Store":
continue
for file in os.listdir(os.path.join(data_folder, folder)):
if file.endswith(".jpg"):
print("calling test() on " + file)
#test(cv2.imread(os.path.join(data_folder, folder, file)),folder+'-'+file[0])
createColumnImages(cv2.imread(os.path.join(data_folder, folder, file)), folder+'-'+file[0], './ocr/columns/'+folder+'/')
for f in os.listdir('./ocr/data/8k71pr786/'):
if f.endswith(".jpg"):
for d in range(550, 850, 50):
createColumnImages(cv2.imread(os.path.join('./ocr/data/8k71pr786/', f)), '8k71pr786-'+f[0]+'-d=' + str(d), './ocr/test_images/test_contour/8k71pr786/', d)
#createColumnImages(cv2.imread('./ocr/data/8k71pr786/'), 'tester2', './ocr/data/columns/tester/')
'''
|
normal
|
{
"blob_id": "91d240b02b9d7a6c569656337521482d57918754",
"index": 4333,
"step-1": "<mask token>\n\n\ndef adaptative_thresholding(img, threshold):\n I = img\n gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n orignrows, origncols = gray.shape\n M = int(np.floor(orignrows / 16) + 1)\n N = int(np.floor(origncols / 16) + 1)\n Mextend = round(M / 2) - 1\n Nextend = round(N / 2) - 1\n aux = cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=\n Nextend, right=Nextend, borderType=cv2.BORDER_REFLECT)\n windows = np.zeros((M, N), np.int32)\n imageIntegral = cv2.integral(aux, windows, -1)\n nrows, ncols = imageIntegral.shape\n result = np.zeros((orignrows, origncols))\n for i in range(nrows - M):\n for j in range(ncols - N):\n result[i, j] = imageIntegral[i + M, j + N] - imageIntegral[i, j + N\n ] + imageIntegral[i, j] - imageIntegral[i + M, j]\n binar = np.ones((orignrows, origncols), dtype=np.bool)\n graymult = gray.astype('float64') * M * N\n binar[graymult <= result * (100.0 - threshold) / 100.0] = False\n binar = (255 * binar).astype(np.uint8)\n return binar\n\n\n<mask token>\n\n\ndef convertToGrayscale(img):\n temp_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return temp_img\n\n\ndef invert(img):\n \"\"\" Black -> White | White -> Black \"\"\"\n print('invert image')\n _, temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)\n return temp_img\n\n\ndef dilateDirection(img, debug=False):\n \"\"\"\n It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'. \n So it increases the white region in the image or size of foreground object increases. \n Normally, in cases like noise removal, erosion is followed by dilation. \n Because, erosion removes white noises, but it also shrinks our object. \n So we dilate it. Since noise is gone, they won't come back, but our object area increases. \n It is also useful in joining broken parts of an object. \n \"\"\"\n print('applying dilation morph')\n temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15)\n \"\"\"\n if debug:\n filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)\n cv2.imwrite(filepath, temp_img)\n \"\"\"\n return temp_img\n\n\ndef createColumnImages(img, basename, directory):\n \"\"\"\n we sum each column of the inverted image. The columns should show up as peaks in the sums\n uses scipy.signal.find_peaks to find those peaks and use them as column indexes\n \"\"\"\n files = []\n temp_img = convertToGrayscale(img)\n temp_img = invert(temp_img)\n temp_img = dilateDirection(temp_img)\n sums = np.sum(temp_img, axis=COLUMNS)\n sums[0] = 1000\n sums = sums * -4\n peaks, _ = find_peaks(sums, distance=600)\n sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))\n sorted_sums = sorted(sum_to_index.keys())\n \"\"\"\n qr = Q_test(sorted_sums)\n if qr:\n peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]\n \"\"\"\n print('PeakNum, Sum, QRemove for ' + basename)\n for x in peaks:\n print(str(x) + ', ' + str(sums[x]))\n print('----------')\n if peaks.size == 0:\n with open('troublesomeImages.txt', 'a') as f:\n print(\n 'ERROR: something went wrong with finding the peaks for image: '\n , os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg 0\\n')\n return files\n peaks[0] = 0\n peaks[-1] = sums.size - 1\n boxed = np.copy(img)\n if peaks.size < 6:\n with open('troublesomeImages.txt', 'a') as f:\n print('found image that is causing problems: ', os.path.join(\n directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg ' + str(peaks\n .size) + '\\n')\n columnIndexPairs = columnIndexes(peaks)\n ystart = 0\n yend = img.shape[0]\n for columnIndexPair in columnIndexPairs:\n xstart = max(columnIndexPair[0] - PADDING, 0)\n xend = min(columnIndexPair[1] + PADDING, img.shape[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (\n basename, xstart, xend))\n files.append(filepath)\n crop_img = img[ystart:yend, xstart:xend]\n print('writing out cropped image: ', filepath)\n if not cv2.imwrite(filepath, crop_img):\n print('failed')\n if CREATE_COLUMN_OUTLINE_IMAGES:\n cv2.rectangle(boxed, (xstart, ystart), (xend, yend), GREEN,\n LINE_THICKNESS)\n if CREATE_COLUMN_OUTLINE_IMAGES:\n filepath = os.path.join(directory, '%s-contours.jpeg' % basename)\n cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])\n return files\n\n\n<mask token>\n\n\ndef test(img, basename):\n test_img = convertToGrayscale(img)\n test_img = invert(test_img)\n test_img = dilateDirection(test_img)\n cv2.imwrite('./ocr/test_images/contours/' + basename + 'dilated.jpg',\n test_img)\n \"\"\"\n for r in range(0, 40, 5):\n name = 'rank=' + str(r) + \".jpg\"\n path = './ocr/test_images/' + name\n\n new_img = ndimage.rank_filter(test_img, rank=r, size=20)\n print(\"writing \" + name)\n cv2.imwrite(path, new_img)\n \"\"\"\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef adaptative_thresholding(img, threshold):\n I = img\n gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n orignrows, origncols = gray.shape\n M = int(np.floor(orignrows / 16) + 1)\n N = int(np.floor(origncols / 16) + 1)\n Mextend = round(M / 2) - 1\n Nextend = round(N / 2) - 1\n aux = cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=\n Nextend, right=Nextend, borderType=cv2.BORDER_REFLECT)\n windows = np.zeros((M, N), np.int32)\n imageIntegral = cv2.integral(aux, windows, -1)\n nrows, ncols = imageIntegral.shape\n result = np.zeros((orignrows, origncols))\n for i in range(nrows - M):\n for j in range(ncols - N):\n result[i, j] = imageIntegral[i + M, j + N] - imageIntegral[i, j + N\n ] + imageIntegral[i, j] - imageIntegral[i + M, j]\n binar = np.ones((orignrows, origncols), dtype=np.bool)\n graymult = gray.astype('float64') * M * N\n binar[graymult <= result * (100.0 - threshold) / 100.0] = False\n binar = (255 * binar).astype(np.uint8)\n return binar\n\n\n<mask token>\n\n\ndef convertToGrayscale(img):\n temp_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return temp_img\n\n\ndef invert(img):\n \"\"\" Black -> White | White -> Black \"\"\"\n print('invert image')\n _, temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)\n return temp_img\n\n\ndef dilateDirection(img, debug=False):\n \"\"\"\n It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'. \n So it increases the white region in the image or size of foreground object increases. \n Normally, in cases like noise removal, erosion is followed by dilation. \n Because, erosion removes white noises, but it also shrinks our object. \n So we dilate it. Since noise is gone, they won't come back, but our object area increases. \n It is also useful in joining broken parts of an object. \n \"\"\"\n print('applying dilation morph')\n temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15)\n \"\"\"\n if debug:\n filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)\n cv2.imwrite(filepath, temp_img)\n \"\"\"\n return temp_img\n\n\ndef createColumnImages(img, basename, directory):\n \"\"\"\n we sum each column of the inverted image. The columns should show up as peaks in the sums\n uses scipy.signal.find_peaks to find those peaks and use them as column indexes\n \"\"\"\n files = []\n temp_img = convertToGrayscale(img)\n temp_img = invert(temp_img)\n temp_img = dilateDirection(temp_img)\n sums = np.sum(temp_img, axis=COLUMNS)\n sums[0] = 1000\n sums = sums * -4\n peaks, _ = find_peaks(sums, distance=600)\n sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))\n sorted_sums = sorted(sum_to_index.keys())\n \"\"\"\n qr = Q_test(sorted_sums)\n if qr:\n peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]\n \"\"\"\n print('PeakNum, Sum, QRemove for ' + basename)\n for x in peaks:\n print(str(x) + ', ' + str(sums[x]))\n print('----------')\n if peaks.size == 0:\n with open('troublesomeImages.txt', 'a') as f:\n print(\n 'ERROR: something went wrong with finding the peaks for image: '\n , os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg 0\\n')\n return files\n peaks[0] = 0\n peaks[-1] = sums.size - 1\n boxed = np.copy(img)\n if peaks.size < 6:\n with open('troublesomeImages.txt', 'a') as f:\n print('found image that is causing problems: ', os.path.join(\n directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg ' + str(peaks\n .size) + '\\n')\n columnIndexPairs = columnIndexes(peaks)\n ystart = 0\n yend = img.shape[0]\n for columnIndexPair in columnIndexPairs:\n xstart = max(columnIndexPair[0] - PADDING, 0)\n xend = min(columnIndexPair[1] + PADDING, img.shape[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (\n basename, xstart, xend))\n files.append(filepath)\n crop_img = img[ystart:yend, xstart:xend]\n print('writing out cropped image: ', filepath)\n if not cv2.imwrite(filepath, crop_img):\n print('failed')\n if CREATE_COLUMN_OUTLINE_IMAGES:\n cv2.rectangle(boxed, (xstart, ystart), (xend, yend), GREEN,\n LINE_THICKNESS)\n if CREATE_COLUMN_OUTLINE_IMAGES:\n filepath = os.path.join(directory, '%s-contours.jpeg' % basename)\n cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])\n return files\n\n\ndef invert_experiment():\n test_img = cv2.imread(\n './ocr/data/8k71pf94q/1_commonwealth_8k71pf94q_accessFull.jpg')\n for thresh in range(1, 200, 20):\n print('writing thresh= ' + str(thresh))\n _, temp_img = cv2.threshold(test_img, thresh, 255, cv2.\n THRESH_BINARY_INV)\n cv2.imwrite('./ocr/test_images/thresh=' + str(thresh) + '.jpg',\n temp_img)\n\n\ndef test(img, basename):\n test_img = convertToGrayscale(img)\n test_img = invert(test_img)\n test_img = dilateDirection(test_img)\n cv2.imwrite('./ocr/test_images/contours/' + basename + 'dilated.jpg',\n test_img)\n \"\"\"\n for r in range(0, 40, 5):\n name = 'rank=' + str(r) + \".jpg\"\n path = './ocr/test_images/' + name\n\n new_img = ndimage.rank_filter(test_img, rank=r, size=20)\n print(\"writing \" + name)\n cv2.imwrite(path, new_img)\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef adaptative_thresholding(img, threshold):\n I = img\n gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n orignrows, origncols = gray.shape\n M = int(np.floor(orignrows / 16) + 1)\n N = int(np.floor(origncols / 16) + 1)\n Mextend = round(M / 2) - 1\n Nextend = round(N / 2) - 1\n aux = cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=\n Nextend, right=Nextend, borderType=cv2.BORDER_REFLECT)\n windows = np.zeros((M, N), np.int32)\n imageIntegral = cv2.integral(aux, windows, -1)\n nrows, ncols = imageIntegral.shape\n result = np.zeros((orignrows, origncols))\n for i in range(nrows - M):\n for j in range(ncols - N):\n result[i, j] = imageIntegral[i + M, j + N] - imageIntegral[i, j + N\n ] + imageIntegral[i, j] - imageIntegral[i + M, j]\n binar = np.ones((orignrows, origncols), dtype=np.bool)\n graymult = gray.astype('float64') * M * N\n binar[graymult <= result * (100.0 - threshold) / 100.0] = False\n binar = (255 * binar).astype(np.uint8)\n return binar\n\n\n<mask token>\n\n\ndef columnIndexes(a):\n \"\"\"\n creates pair of indexes for left and right index of the image column\n For example [13, 1257, 2474, 3695, 4907, 6149]\n becomes: [[13 1257], [1257 2474], [2474 3695], [3695 4907], [4907 6149]]\n \"\"\"\n nrows = a.size - 2 + 1\n return a[1 * np.arange(nrows)[:, None] + np.arange(2)]\n\n\ndef convertToGrayscale(img):\n temp_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return temp_img\n\n\ndef invert(img):\n \"\"\" Black -> White | White -> Black \"\"\"\n print('invert image')\n _, temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)\n return temp_img\n\n\ndef dilateDirection(img, debug=False):\n \"\"\"\n It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'. \n So it increases the white region in the image or size of foreground object increases. \n Normally, in cases like noise removal, erosion is followed by dilation. \n Because, erosion removes white noises, but it also shrinks our object. \n So we dilate it. Since noise is gone, they won't come back, but our object area increases. \n It is also useful in joining broken parts of an object. \n \"\"\"\n print('applying dilation morph')\n temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15)\n \"\"\"\n if debug:\n filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)\n cv2.imwrite(filepath, temp_img)\n \"\"\"\n return temp_img\n\n\ndef createColumnImages(img, basename, directory):\n \"\"\"\n we sum each column of the inverted image. The columns should show up as peaks in the sums\n uses scipy.signal.find_peaks to find those peaks and use them as column indexes\n \"\"\"\n files = []\n temp_img = convertToGrayscale(img)\n temp_img = invert(temp_img)\n temp_img = dilateDirection(temp_img)\n sums = np.sum(temp_img, axis=COLUMNS)\n sums[0] = 1000\n sums = sums * -4\n peaks, _ = find_peaks(sums, distance=600)\n sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))\n sorted_sums = sorted(sum_to_index.keys())\n \"\"\"\n qr = Q_test(sorted_sums)\n if qr:\n peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]\n \"\"\"\n print('PeakNum, Sum, QRemove for ' + basename)\n for x in peaks:\n print(str(x) + ', ' + str(sums[x]))\n print('----------')\n if peaks.size == 0:\n with open('troublesomeImages.txt', 'a') as f:\n print(\n 'ERROR: something went wrong with finding the peaks for image: '\n , os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg 0\\n')\n return files\n peaks[0] = 0\n peaks[-1] = sums.size - 1\n boxed = np.copy(img)\n if peaks.size < 6:\n with open('troublesomeImages.txt', 'a') as f:\n print('found image that is causing problems: ', os.path.join(\n directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg ' + str(peaks\n .size) + '\\n')\n columnIndexPairs = columnIndexes(peaks)\n ystart = 0\n yend = img.shape[0]\n for columnIndexPair in columnIndexPairs:\n xstart = max(columnIndexPair[0] - PADDING, 0)\n xend = min(columnIndexPair[1] + PADDING, img.shape[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (\n basename, xstart, xend))\n files.append(filepath)\n crop_img = img[ystart:yend, xstart:xend]\n print('writing out cropped image: ', filepath)\n if not cv2.imwrite(filepath, crop_img):\n print('failed')\n if CREATE_COLUMN_OUTLINE_IMAGES:\n cv2.rectangle(boxed, (xstart, ystart), (xend, yend), GREEN,\n LINE_THICKNESS)\n if CREATE_COLUMN_OUTLINE_IMAGES:\n filepath = os.path.join(directory, '%s-contours.jpeg' % basename)\n cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])\n return files\n\n\ndef invert_experiment():\n test_img = cv2.imread(\n './ocr/data/8k71pf94q/1_commonwealth_8k71pf94q_accessFull.jpg')\n for thresh in range(1, 200, 20):\n print('writing thresh= ' + str(thresh))\n _, temp_img = cv2.threshold(test_img, thresh, 255, cv2.\n THRESH_BINARY_INV)\n cv2.imwrite('./ocr/test_images/thresh=' + str(thresh) + '.jpg',\n temp_img)\n\n\ndef test(img, basename):\n test_img = convertToGrayscale(img)\n test_img = invert(test_img)\n test_img = dilateDirection(test_img)\n cv2.imwrite('./ocr/test_images/contours/' + basename + 'dilated.jpg',\n test_img)\n \"\"\"\n for r in range(0, 40, 5):\n name = 'rank=' + str(r) + \".jpg\"\n path = './ocr/test_images/' + name\n\n new_img = ndimage.rank_filter(test_img, rank=r, size=20)\n print(\"writing \" + name)\n cv2.imwrite(path, new_img)\n \"\"\"\n\n\n<mask token>\n",
"step-4": "<mask token>\nos.environ['OPENCV_IO_ENABLE_JASPER'] = 'True'\n<mask token>\nDILATE_KERNEL = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0,\n 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0,\n 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0,\n 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype\n =np.uint8)\n\n\ndef adaptative_thresholding(img, threshold):\n I = img\n gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n orignrows, origncols = gray.shape\n M = int(np.floor(orignrows / 16) + 1)\n N = int(np.floor(origncols / 16) + 1)\n Mextend = round(M / 2) - 1\n Nextend = round(N / 2) - 1\n aux = cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=\n Nextend, right=Nextend, borderType=cv2.BORDER_REFLECT)\n windows = np.zeros((M, N), np.int32)\n imageIntegral = cv2.integral(aux, windows, -1)\n nrows, ncols = imageIntegral.shape\n result = np.zeros((orignrows, origncols))\n for i in range(nrows - M):\n for j in range(ncols - N):\n result[i, j] = imageIntegral[i + M, j + N] - imageIntegral[i, j + N\n ] + imageIntegral[i, j] - imageIntegral[i + M, j]\n binar = np.ones((orignrows, origncols), dtype=np.bool)\n graymult = gray.astype('float64') * M * N\n binar[graymult <= result * (100.0 - threshold) / 100.0] = False\n binar = (255 * binar).astype(np.uint8)\n return binar\n\n\ndef Q_test(sorted_data):\n conf95_level = {(3): 0.97, (4): 0.829, (5): 0.71, (6): 0.625, (7): \n 0.568, (8): 0.526, (9): 0.493}\n q_exp = abs(sorted_data[1] - sorted_data[0]) / abs(sorted_data[-1] -\n sorted_data[0])\n print(str(abs(sorted_data[1] - sorted_data[0])) + ' / ' + str(abs(\n sorted_data[-1] - sorted_data[0])))\n print('q_exp : ' + str(q_exp))\n return q_exp > conf95_level[min(9, len(sorted_data))]\n\n\nCOLUMNS = 0\nGREEN = 0, 255, 0\nLINE_THICKNESS = 3\nPADDING = 10\nCREATE_COLUMN_OUTLINE_IMAGES = True\n\n\ndef columnIndexes(a):\n \"\"\"\n creates pair of indexes for left and right index of the image column\n For example [13, 1257, 2474, 3695, 4907, 6149]\n becomes: [[13 1257], [1257 2474], [2474 3695], [3695 4907], [4907 6149]]\n \"\"\"\n nrows = a.size - 2 + 1\n return a[1 * np.arange(nrows)[:, None] + np.arange(2)]\n\n\ndef convertToGrayscale(img):\n temp_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return temp_img\n\n\ndef invert(img):\n \"\"\" Black -> White | White -> Black \"\"\"\n print('invert image')\n _, temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)\n return temp_img\n\n\ndef dilateDirection(img, debug=False):\n \"\"\"\n It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'. \n So it increases the white region in the image or size of foreground object increases. \n Normally, in cases like noise removal, erosion is followed by dilation. \n Because, erosion removes white noises, but it also shrinks our object. \n So we dilate it. Since noise is gone, they won't come back, but our object area increases. \n It is also useful in joining broken parts of an object. \n \"\"\"\n print('applying dilation morph')\n temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15)\n \"\"\"\n if debug:\n filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)\n cv2.imwrite(filepath, temp_img)\n \"\"\"\n return temp_img\n\n\ndef createColumnImages(img, basename, directory):\n \"\"\"\n we sum each column of the inverted image. The columns should show up as peaks in the sums\n uses scipy.signal.find_peaks to find those peaks and use them as column indexes\n \"\"\"\n files = []\n temp_img = convertToGrayscale(img)\n temp_img = invert(temp_img)\n temp_img = dilateDirection(temp_img)\n sums = np.sum(temp_img, axis=COLUMNS)\n sums[0] = 1000\n sums = sums * -4\n peaks, _ = find_peaks(sums, distance=600)\n sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))\n sorted_sums = sorted(sum_to_index.keys())\n \"\"\"\n qr = Q_test(sorted_sums)\n if qr:\n peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]\n \"\"\"\n print('PeakNum, Sum, QRemove for ' + basename)\n for x in peaks:\n print(str(x) + ', ' + str(sums[x]))\n print('----------')\n if peaks.size == 0:\n with open('troublesomeImages.txt', 'a') as f:\n print(\n 'ERROR: something went wrong with finding the peaks for image: '\n , os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg 0\\n')\n return files\n peaks[0] = 0\n peaks[-1] = sums.size - 1\n boxed = np.copy(img)\n if peaks.size < 6:\n with open('troublesomeImages.txt', 'a') as f:\n print('found image that is causing problems: ', os.path.join(\n directory, basename))\n f.write(os.path.join(directory, basename) + '.jpg ' + str(peaks\n .size) + '\\n')\n columnIndexPairs = columnIndexes(peaks)\n ystart = 0\n yend = img.shape[0]\n for columnIndexPair in columnIndexPairs:\n xstart = max(columnIndexPair[0] - PADDING, 0)\n xend = min(columnIndexPair[1] + PADDING, img.shape[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (\n basename, xstart, xend))\n files.append(filepath)\n crop_img = img[ystart:yend, xstart:xend]\n print('writing out cropped image: ', filepath)\n if not cv2.imwrite(filepath, crop_img):\n print('failed')\n if CREATE_COLUMN_OUTLINE_IMAGES:\n cv2.rectangle(boxed, (xstart, ystart), (xend, yend), GREEN,\n LINE_THICKNESS)\n if CREATE_COLUMN_OUTLINE_IMAGES:\n filepath = os.path.join(directory, '%s-contours.jpeg' % basename)\n cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])\n return files\n\n\ndef invert_experiment():\n test_img = cv2.imread(\n './ocr/data/8k71pf94q/1_commonwealth_8k71pf94q_accessFull.jpg')\n for thresh in range(1, 200, 20):\n print('writing thresh= ' + str(thresh))\n _, temp_img = cv2.threshold(test_img, thresh, 255, cv2.\n THRESH_BINARY_INV)\n cv2.imwrite('./ocr/test_images/thresh=' + str(thresh) + '.jpg',\n temp_img)\n\n\ndef test(img, basename):\n test_img = convertToGrayscale(img)\n test_img = invert(test_img)\n test_img = dilateDirection(test_img)\n cv2.imwrite('./ocr/test_images/contours/' + basename + 'dilated.jpg',\n test_img)\n \"\"\"\n for r in range(0, 40, 5):\n name = 'rank=' + str(r) + \".jpg\"\n path = './ocr/test_images/' + name\n\n new_img = ndimage.rank_filter(test_img, rank=r, size=20)\n print(\"writing \" + name)\n cv2.imwrite(path, new_img)\n \"\"\"\n\n\nif __name__ == '__main__':\n print('STARTING')\n for f in os.listdir('./ocr/data/gb19gw39h/'):\n if f.endswith('.jpg'):\n createColumnImages(cv2.imread(os.path.join(\n './ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0],\n './ocr/columns/gb19gw39h/')\n for f in os.listdir('./ocr/data/8k71pf94q/'):\n if f.endswith('.jpg'):\n createColumnImages(cv2.imread(os.path.join(\n './ocr/data/8k71pf94q/', f)), '8k71pf94q-' + f[0],\n './ocr/columns/8k71pf94q/')\n for f in os.listdir('./ocr/data/mc87rq85m/'):\n if f.endswith('.jpg'):\n createColumnImages(cv2.imread(os.path.join(\n './ocr/data/mc87rq85m/', f)), 'mc87rq85m-' + f[0],\n './ocr/columns/mc87rq85m/')\n \"\"\"\n data_folder = './ocr/data/'\n for folder in os.listdir(data_folder):\n if folder == \".DS_Store\":\n continue\n for file in os.listdir(os.path.join(data_folder, folder)):\n if file.endswith(\".jpg\"):\n print(\"calling test() on \" + file)\n #test(cv2.imread(os.path.join(data_folder, folder, file)),folder+'-'+file[0])\n createColumnImages(cv2.imread(os.path.join(data_folder, folder, file)), folder+'-'+file[0], './ocr/columns/'+folder+'/')\n \n for f in os.listdir('./ocr/data/8k71pr786/'):\n if f.endswith(\".jpg\"):\n for d in range(550, 850, 50):\n createColumnImages(cv2.imread(os.path.join('./ocr/data/8k71pr786/', f)), '8k71pr786-'+f[0]+'-d=' + str(d), './ocr/test_images/test_contour/8k71pr786/', d)\n #createColumnImages(cv2.imread('./ocr/data/8k71pr786/'), 'tester2', './ocr/data/columns/tester/')\n \"\"\"\n",
"step-5": "\n# https://github.com/jscancella/NYTribuneOCRExperiments/blob/master/findText_usingSums.py\nimport os\nimport io\nfrom pathlib import Path\nimport sys\nos.environ['OPENCV_IO_ENABLE_JASPER']='True' # has to be set before importing cv2 otherwise it won't read the variable\nimport numpy as np\nimport cv2\n\nimport subprocess\nfrom multiprocessing import Pool\nfrom scipy.signal import find_peaks, find_peaks_cwt\n\nimport scipy.ndimage as ndimage\nfrom IPython.display import Image as KImage\n\n#custom kernel that is used to blend together text in the Y axis\nDILATE_KERNEL = np.array([\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype=np.uint8)\n\n\n# Run adaptative thresholding (is slow af compared to not using it in pipeline)\ndef adaptative_thresholding(img, threshold):\n # Load image\n I = img\n # Convert image to grayscale\n gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n # Original image size\n orignrows, origncols = gray.shape\n # Windows size\n M = int(np.floor(orignrows/16) + 1)\n N = int(np.floor(origncols/16) + 1)\n # Image border padding related to windows size\n Mextend = round(M/2)-1\n Nextend = round(N/2)-1\n # Padding image\n aux =cv2.copyMakeBorder(gray, top=Mextend, bottom=Mextend, left=Nextend,\n right=Nextend, borderType=cv2.BORDER_REFLECT)\n windows = np.zeros((M,N),np.int32)\n # Image integral calculation\n imageIntegral = cv2.integral(aux, windows,-1)\n # Integral image size\n nrows, ncols = imageIntegral.shape\n # Memory allocation for cumulative region image\n result = np.zeros((orignrows, origncols))\n # Image cumulative pixels in windows size calculation\n for i in range(nrows-M):\n for j in range(ncols-N):\n result[i, j] = imageIntegral[i+M, j+N] - imageIntegral[i, j+N]+ imageIntegral[i, j] - imageIntegral[i+M,j]\n\n # Output binary image memory allocation\n binar = np.ones((orignrows, origncols), dtype=np.bool)\n # Gray image weighted by windows size\n graymult = (gray).astype('float64')*M*N\n # Output image binarization\n binar[graymult <= result*(100.0 - threshold)/100.0] = False\n # binary image to UINT8 conversion\n binar = (255*binar).astype(np.uint8)\n\n return binar\n\ndef Q_test(sorted_data):\n conf95_level = {3: .97, 4: .829, 5: .71, 6: .625, 7: .568, 8: .526, 9: .493}\n q_exp = abs(sorted_data[1] - sorted_data[0]) / abs(sorted_data[-1] - sorted_data[0])\n print(str(abs(sorted_data[1] - sorted_data[0])) + ' / ' + str(abs(sorted_data[-1] - sorted_data[0])))\n print(\"q_exp : \" + str(q_exp))\n return q_exp > conf95_level[min(9, len(sorted_data))]\n\n\n# static variables for clarity\nCOLUMNS = 0\nGREEN = (0, 255, 0)\n\n# parameters that can be tweaked\nLINE_THICKNESS = 3 # how thick to make the line around the found contours in the debug output\nPADDING = 10 # padding to add around the found possible column to help account for image skew and such\nCREATE_COLUMN_OUTLINE_IMAGES = True # if we detect that we didn't find all the columns. Create a debug image (tiff) showing the columns that were found\n\ndef columnIndexes(a):\n \"\"\"\n creates pair of indexes for left and right index of the image column\n For example [13, 1257, 2474, 3695, 4907, 6149]\n becomes: [[13 1257], [1257 2474], [2474 3695], [3695 4907], [4907 6149]]\n \"\"\"\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]\n\ndef convertToGrayscale(img):\n temp_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n return temp_img\n\ndef invert(img):\n \"\"\" Black -> White | White -> Black \"\"\"\n print(\"invert image\")\n # Should we edit these parameters?\n #3/18/21 - experimented on threshold, 140 is good.\n _,temp_img = cv2.threshold(img, 140, 255, cv2.THRESH_BINARY_INV)\n return temp_img\n\ndef dilateDirection(img, debug=False):\n \"\"\"\n It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel is '1'. \n So it increases the white region in the image or size of foreground object increases. \n Normally, in cases like noise removal, erosion is followed by dilation. \n Because, erosion removes white noises, but it also shrinks our object. \n So we dilate it. Since noise is gone, they won't come back, but our object area increases. \n It is also useful in joining broken parts of an object. \n \"\"\"\n print(\"applying dilation morph\")\n temp_img = cv2.dilate(img, DILATE_KERNEL, iterations=15) #the more iterations the more the text gets stretched in the Y axis, 15 seems about right.\n '''\n if debug:\n filepath = os.path.join(debugOutputDirectory, '%s-dilation.tiff' % basename)\n cv2.imwrite(filepath, temp_img)\n '''\n return temp_img\n\ndef createColumnImages(img, basename, directory):\n \"\"\"\n we sum each column of the inverted image. The columns should show up as peaks in the sums\n uses scipy.signal.find_peaks to find those peaks and use them as column indexes\n \"\"\"\n files = []\n temp_img = convertToGrayscale(img)\n temp_img = invert(temp_img)\n temp_img = dilateDirection(temp_img)\n \n sums = np.sum(temp_img, axis = COLUMNS)\n \n sums[0] = 1000 # some random value so that find_peaks properly detects the peak for the left most column\n sums = sums * -4 # invert so that minimums become maximums and exagerate the data so it is more clear what the peaks are \n peaks, _ = find_peaks(sums, distance=600) # the column indexs of the img array, spaced at least 800 away from the previous peak\n\n sum_to_index = dict((sums[peaks[i]], peaks[i]) for i in range(len(peaks)))\n sorted_sums = sorted(sum_to_index.keys())\n '''\n qr = Q_test(sorted_sums)\n if qr:\n peaks = peaks[peaks != sum_to_index[sorted_sums[0]]]\n '''\n print(\"PeakNum, Sum, QRemove for \" + basename)\n for x in peaks:\n print(str(x) + ', ' + str(sums[x]))\n print(\"----------\")\n\n if peaks.size == 0:\n with open('troublesomeImages.txt', 'a') as f:\n print(\"ERROR: something went wrong with finding the peaks for image: \", os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + \".jpg 0\\n\")\n return files\n\n peaks[0] = 0 # automatically make the left most column index the start of the image\n peaks[-1] =sums.size -1 # automatically make the right most column index the end of the image\n\n boxed = np.copy(img)\n if peaks.size < 6:\n with open('troublesomeImages.txt', 'a') as f:\n print(\"found image that is causing problems: \", os.path.join(directory, basename))\n f.write(os.path.join(directory, basename) + \".jpg \" + str(peaks.size) + \"\\n\")\n\n columnIndexPairs = columnIndexes(peaks)\n\n ystart = 0\n yend = img.shape[0]\n for columnIndexPair in columnIndexPairs:\n xstart = max(columnIndexPair[0]-PADDING, 0)\n xend = min(columnIndexPair[1]+PADDING, img.shape[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n filepath = os.path.join(directory, '%s_xStart%s_xEnd%s.jpg' % (basename, xstart,xend))\n files.append(filepath)\n crop_img = img[ystart:yend, xstart:xend]\n \n print(\"writing out cropped image: \", filepath)\n # Apply adaptative thresholding to the image with a threshold of 25/100\n #crop_img = adaptative_thresholding(crop_img, 25)\n if not cv2.imwrite(filepath, crop_img):\n print('failed')\n\n if CREATE_COLUMN_OUTLINE_IMAGES:\n cv2.rectangle(boxed,(xstart,ystart),(xend,yend), GREEN, LINE_THICKNESS)\n\n if CREATE_COLUMN_OUTLINE_IMAGES:\n filepath = os.path.join(directory, '%s-contours.jpeg' % basename)\n cv2.imwrite(filepath, boxed, [cv2.IMWRITE_JPEG_QUALITY, 50])\n # For removing the old image?\n # os.remove(os.path.join(directory, basename + \".jp2\"))\n\n return files\n\ndef invert_experiment():\n test_img = cv2.imread('./ocr/data/8k71pf94q/1_commonwealth_8k71pf94q_accessFull.jpg')\n for thresh in range(1, 200, 20):\n print('writing thresh= ' + str(thresh))\n _,temp_img = cv2.threshold(test_img, thresh, 255, cv2.THRESH_BINARY_INV)\n cv2.imwrite('./ocr/test_images/thresh='+str(thresh)+'.jpg', temp_img)\n\n\n\ndef test(img, basename):\n #h, w, _ = img.shape\n #test_img = cv2.imread('./ocr/data/8k71pf94q/2_commonwealth_8k71pf94q_accessFull.jpg')\n test_img = convertToGrayscale(img)\n #ret,test_img = cv2.threshold(test_img,25,255,0)\n #cv2.imwrite('./ocr/test_images/contours/'+basename+'prepixelcrop.jpg', test_img)\n #test_img = test_img[10:h-10, 10: w-10]\n #y_nonzero, x_nonzero = np.nonzero(test_img)\n #test_img = test_img[np.min(y_nonzero):np.max(y_nonzero), np.min(x_nonzero):np.max(x_nonzero)]\n test_img = invert(test_img)\n test_img = dilateDirection(test_img)\n\n #contours,hierarchy = cv2.findContours(test_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n #cnt = contours[0]\n #x,y,w,h = cv2.boundingRect(cnt)\n #test_img = cv2.rectangle(img,(10,10),(w-10, h-10), GREEN, LINE_THICKNESS)\n #test_img = cv2.drawContours(test_img, contours, -1, GREEN, LINE_THICKNESS)\n #crop = test_img[y:y+h,x:x+w]\n cv2.imwrite('./ocr/test_images/contours/'+basename+'dilated.jpg', test_img)\n '''\n for r in range(0, 40, 5):\n name = 'rank=' + str(r) + \".jpg\"\n path = './ocr/test_images/' + name\n\n new_img = ndimage.rank_filter(test_img, rank=r, size=20)\n print(\"writing \" + name)\n cv2.imwrite(path, new_img)\n '''\n #cv2.imwrite('./ocr/test_images/inverted.jpg', test_img)\n\n \n\n\nif __name__ == \"__main__\":\n print(\"STARTING\")\n for f in os.listdir('./ocr/data/gb19gw39h/'):\n if f.endswith(\".jpg\"):\n #test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])\n createColumnImages(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0], './ocr/columns/gb19gw39h/')\n\n for f in os.listdir('./ocr/data/8k71pf94q/'):\n if f.endswith(\".jpg\"):\n #test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])\n createColumnImages(cv2.imread(os.path.join('./ocr/data/8k71pf94q/', f)), '8k71pf94q-' + f[0], './ocr/columns/8k71pf94q/')\n\n for f in os.listdir('./ocr/data/mc87rq85m/'):\n if f.endswith(\".jpg\"):\n #test(cv2.imread(os.path.join('./ocr/data/gb19gw39h/', f)), 'gb19gw39h-' + f[0])\n createColumnImages(cv2.imread(os.path.join('./ocr/data/mc87rq85m/', f)), 'mc87rq85m-' + f[0], './ocr/columns/mc87rq85m/')\n\n '''\n data_folder = './ocr/data/'\n for folder in os.listdir(data_folder):\n if folder == \".DS_Store\":\n continue\n for file in os.listdir(os.path.join(data_folder, folder)):\n if file.endswith(\".jpg\"):\n print(\"calling test() on \" + file)\n #test(cv2.imread(os.path.join(data_folder, folder, file)),folder+'-'+file[0])\n createColumnImages(cv2.imread(os.path.join(data_folder, folder, file)), folder+'-'+file[0], './ocr/columns/'+folder+'/')\n \n for f in os.listdir('./ocr/data/8k71pr786/'):\n if f.endswith(\".jpg\"):\n for d in range(550, 850, 50):\n createColumnImages(cv2.imread(os.path.join('./ocr/data/8k71pr786/', f)), '8k71pr786-'+f[0]+'-d=' + str(d), './ocr/test_images/test_contour/8k71pr786/', d)\n #createColumnImages(cv2.imread('./ocr/data/8k71pr786/'), 'tester2', './ocr/data/columns/tester/')\n '''\n\n",
"step-ids": [
6,
7,
8,
11,
13
]
}
|
[
6,
7,
8,
11,
13
] |
from django.test import TestCase
# Create your tests here.
def Add_course(self,user):
|
normal
|
{
"blob_id": "7fc239e7f44c5f6a8e5bebe3e4910aee4d8e4af3",
"index": 9266,
"step-1": "from django.test import TestCase\n\n# Create your tests here.\n\ndef Add_course(self,user):\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
experiment_name = "nodes10"
wall = "wall2"
wall_image = "irati_110"
mr_dif_policy = True
spn_dif_policy = True
destination_ip = "2001:40b0:7500:286:84:88:81:57"
|
normal
|
{
"blob_id": "78db25586f742b0a20bc3fad382b0d4f1a271841",
"index": 3970,
"step-1": "<mask token>\n",
"step-2": "experiment_name = 'nodes10'\nwall = 'wall2'\nwall_image = 'irati_110'\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = '2001:40b0:7500:286:84:88:81:57'\n",
"step-3": "#!/usr/bin/python3\n\nexperiment_name = \"nodes10\"\nwall = \"wall2\"\nwall_image = \"irati_110\"\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = \"2001:40b0:7500:286:84:88:81:57\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from fgpio import GPIO
import boards
|
normal
|
{
"blob_id": "f66f79cd4132b23c082149a3a1d887f661fd7ee5",
"index": 7247,
"step-1": "<mask token>\n",
"step-2": "from fgpio import GPIO\nimport boards\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# %%
import pandas as pd
import numpy as np
from dataprep.eda import plot
from dataprep.eda import plot_correlation
from dataprep.eda import plot_missing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
sns.set(font_scale=1)
# %%
# Minimal Processing
wines = pd.read_csv("winemag-data-130k-v2.csv")
wines.columns
wines.drop(columns='Unnamed: 0', inplace=True)
wines.dropna(axis='index', subset=['price'], inplace=True)
wines.drop_duplicates(inplace=True)
# %%
# Overall Distribution
plot(wines)
# %% # Price Dist -> Clean
plot(wines, "price")
# %%
plot(wines, "points")
# %%
plot(wines, "price", "points")
# %%
plot_correlation(wines, "price", "points")
# %%
plot_missing(wines)
# %%
plot_missing(wines, "price", "points")
# %%
plot_correlation(wines, "price")
# %%
# END EDA
# %
|
normal
|
{
"blob_id": "79e8ed64058dda6c8d7bacc08727bc978088ad2d",
"index": 4963,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.set(style='whitegrid', color_codes=True)\nsns.set(font_scale=1)\n<mask token>\nwines.columns\nwines.drop(columns='Unnamed: 0', inplace=True)\nwines.dropna(axis='index', subset=['price'], inplace=True)\nwines.drop_duplicates(inplace=True)\nplot(wines)\nplot(wines, 'price')\nplot(wines, 'points')\nplot(wines, 'price', 'points')\nplot_correlation(wines, 'price', 'points')\nplot_missing(wines)\nplot_missing(wines, 'price', 'points')\nplot_correlation(wines, 'price')\n",
"step-3": "<mask token>\nsns.set(style='whitegrid', color_codes=True)\nsns.set(font_scale=1)\nwines = pd.read_csv('winemag-data-130k-v2.csv')\nwines.columns\nwines.drop(columns='Unnamed: 0', inplace=True)\nwines.dropna(axis='index', subset=['price'], inplace=True)\nwines.drop_duplicates(inplace=True)\nplot(wines)\nplot(wines, 'price')\nplot(wines, 'points')\nplot(wines, 'price', 'points')\nplot_correlation(wines, 'price', 'points')\nplot_missing(wines)\nplot_missing(wines, 'price', 'points')\nplot_correlation(wines, 'price')\n",
"step-4": "import pandas as pd\nimport numpy as np\nfrom dataprep.eda import plot\nfrom dataprep.eda import plot_correlation\nfrom dataprep.eda import plot_missing\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style='whitegrid', color_codes=True)\nsns.set(font_scale=1)\nwines = pd.read_csv('winemag-data-130k-v2.csv')\nwines.columns\nwines.drop(columns='Unnamed: 0', inplace=True)\nwines.dropna(axis='index', subset=['price'], inplace=True)\nwines.drop_duplicates(inplace=True)\nplot(wines)\nplot(wines, 'price')\nplot(wines, 'points')\nplot(wines, 'price', 'points')\nplot_correlation(wines, 'price', 'points')\nplot_missing(wines)\nplot_missing(wines, 'price', 'points')\nplot_correlation(wines, 'price')\n",
"step-5": "# %%\nimport pandas as pd\nimport numpy as np\n\nfrom dataprep.eda import plot\nfrom dataprep.eda import plot_correlation\nfrom dataprep.eda import plot_missing\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"whitegrid\", color_codes=True)\nsns.set(font_scale=1)\n\n# %%\n# Minimal Processing\nwines = pd.read_csv(\"winemag-data-130k-v2.csv\")\nwines.columns\nwines.drop(columns='Unnamed: 0', inplace=True)\nwines.dropna(axis='index', subset=['price'], inplace=True)\nwines.drop_duplicates(inplace=True)\n# %%\n# Overall Distribution\nplot(wines)\n# %% # Price Dist -> Clean\nplot(wines, \"price\")\n\n# %%\nplot(wines, \"points\")\n\n# %%\nplot(wines, \"price\", \"points\")\n\n# %%\nplot_correlation(wines, \"price\", \"points\")\n# %%\nplot_missing(wines)\n# %%\nplot_missing(wines, \"price\", \"points\")\n\n# %%\nplot_correlation(wines, \"price\")\n# %%\n# END EDA\n# %\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import os
from appia.processors.core import normalizer
from math import ceil
class Experiment:
def __init__(self, id) -> None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=["Normalization", "Channel", "mL"])
except AttributeError:
self._hplc = df
else:
raise TypeError("HPLC input is not a pandas dataframe")
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
@fplc.setter
def fplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
self._fplc = df
else:
raise TypeError("FPLC input is not a pandas dataframe")
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide["Normalization"] == "Signal"]
wide["Sample"] = wide["Sample"].astype(str) + " " + wide["Channel"]
wide.drop(["Channel", "Normalization"], axis=1)
wide = wide.pivot_table(index="Time", columns="Sample", values="Value")
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += "HPLC "
if self.hplc is not None and self.fplc is not None:
to_return += "and "
if self.fplc is not None:
to_return += "FPLC "
if self.hplc is None and self.fplc is None:
to_return += "no "
to_return += "data"
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(f"Tried to extend experiment hplc with {type(hplc)}")
self.hplc = pd.concat([self.hplc, hplc])
def show_tables(self):
print("HPLC:")
print(self.hplc)
print("FPLC:")
print(self.fplc)
def jsonify(self):
if self.hplc is not None:
hplc_json = (
self.hplc.pivot_table(
index=["mL", "Channel", "Time", "Normalization"],
columns="Sample",
values="Value",
)
.reset_index()
.to_json()
)
else:
hplc_json = ""
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ""
doc = {
"_id": self.id,
"version": self.version,
"hplc": hplc_json,
"fplc": fplc_json,
}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError("No HPLC data")
# this arcane string of pandas commands is the equivalent of pivot_wider from tidyverse
# from https://medium.com/@durgaswaroop/reshaping-pandas-dataframes-melt-and-unmelt-9f57518c7738;.'/
hplc = self.hplc.pivot(
index=["mL", "Sample", "Channel", "Time"], columns=["Normalization"]
)["Value"].reset_index()
hplc = hplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
hplc = hplc.melt(
id_vars=["mL", "Sample", "Channel", "Time"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError("No FPLC data")
fplc = self.fplc.pivot(
index=["mL", "CV", "Fraction", "Channel", "Sample"],
columns=["Normalization"],
)["Value"].reset_index()
fplc = fplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
fplc = fplc.melt(
id_vars=["mL", "CV", "Channel", "Fraction", "Sample"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.fplc = fplc
def reduce_hplc(self, num_points):
# reduce the number of points in the hplc trace to num_points per sample/channel/norm
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(
["Channel", "Sample", "Normalization"], group_keys=False, as_index=False
).apply(lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({"Channel": channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == ".csv":
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + "-long.csv", index=False)
self.wide.to_csv(outfile + "-wide.csv", index=True)
return outfile + "-long.csv"
def fplc_csv(self, outfile):
if outfile[-4:] != ".csv":
outfile = outfile + ".csv"
if self.fplc is not None:
self.fplc.to_csv(outfile, index=False)
return outfile
def save_csvs(self, path):
hplc_csv = self.hplc_csv(os.path.join(path, f"{self.id}_hplc"))
fplc_csv = self.fplc_csv(os.path.join(path, f"{self.id}_fplc"))
return hplc_csv, fplc_csv
def concat_experiments(exp_list):
hplcs = []
fplcs = []
for exp in [x for x in exp_list if x.hplc is not None]:
hplc = exp.hplc
hplc["Sample"] = f"{exp.id}: " + hplc["Sample"].astype(str)
hplcs.append(hplc)
for exp in [x for x in exp_list if x.fplc is not None]:
fplc = exp.fplc
fplc["Sample"] = exp.id
fplcs.append(fplc)
concat_exp = Experiment("concat")
try:
concat_exp.hplc = pd.concat(hplcs)
except ValueError:
pass
try:
concat_exp.fplc = pd.concat(fplcs)
except ValueError:
pass
return concat_exp
|
normal
|
{
"blob_id": "754b34028780231c7eccb98cdf3e83bd615d843f",
"index": 5276,
"step-1": "<mask token>\n\n\nclass Experiment:\n <mask token>\n <mask token>\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n <mask token>\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n <mask token>\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n <mask token>\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n <mask token>\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n <mask token>\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError('FPLC input is not a pandas dataframe')\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print('HPLC:')\n print(self.hplc)\n print('FPLC:')\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != '.csv':\n outfile = outfile + '.csv'\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))\n fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)\n hplcs.append(hplc)\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc['Sample'] = exp.id\n fplcs.append(fplc)\n concat_exp = Experiment('concat')\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n return concat_exp\n",
"step-4": "import pandas as pd\nimport os\nfrom appia.processors.core import normalizer\nfrom math import ceil\n\n\nclass Experiment:\n\n def __init__(self, id) ->None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=['Normalization', 'Channel',\n 'mL'])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError('HPLC input is not a pandas dataframe')\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError('FPLC input is not a pandas dataframe')\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide['Normalization'] == 'Signal']\n wide['Sample'] = wide['Sample'].astype(str) + ' ' + wide['Channel']\n wide.drop(['Channel', 'Normalization'], axis=1)\n wide = wide.pivot_table(index='Time', columns='Sample', values='Value')\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += 'HPLC '\n if self.hplc is not None and self.fplc is not None:\n to_return += 'and '\n if self.fplc is not None:\n to_return += 'FPLC '\n if self.hplc is None and self.fplc is None:\n to_return += 'no '\n to_return += 'data'\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(\n f'Tried to extend experiment hplc with {type(hplc)}')\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print('HPLC:')\n print(self.hplc)\n print('FPLC:')\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = self.hplc.pivot_table(index=['mL', 'Channel',\n 'Time', 'Normalization'], columns='Sample', values='Value'\n ).reset_index().to_json()\n else:\n hplc_json = ''\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = ''\n doc = {'_id': self.id, 'version': self.version, 'hplc': hplc_json,\n 'fplc': fplc_json}\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError('No HPLC data')\n hplc = self.hplc.pivot(index=['mL', 'Sample', 'Channel', 'Time'],\n columns=['Normalization'])['Value'].reset_index()\n hplc = hplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n hplc = hplc.melt(id_vars=['mL', 'Sample', 'Channel', 'Time'],\n value_vars=['Signal', 'Normalized'], var_name='Normalization',\n value_name='Value')\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError('No FPLC data')\n fplc = self.fplc.pivot(index=['mL', 'CV', 'Fraction', 'Channel',\n 'Sample'], columns=['Normalization'])['Value'].reset_index()\n fplc = fplc.groupby(['Sample', 'Channel'], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict))\n fplc = fplc.melt(id_vars=['mL', 'CV', 'Channel', 'Fraction',\n 'Sample'], value_vars=['Signal', 'Normalized'], var_name=\n 'Normalization', value_name='Value')\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n try:\n self.hplc = self.hplc.groupby(['Channel', 'Sample',\n 'Normalization'], group_keys=False, as_index=False).apply(\n lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({'Channel': channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == '.csv':\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + '-long.csv', index=False)\n self.wide.to_csv(outfile + '-wide.csv', index=True)\n return outfile + '-long.csv'\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != '.csv':\n outfile = outfile + '.csv'\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f'{self.id}_hplc'))\n fplc_csv = self.fplc_csv(os.path.join(path, f'{self.id}_fplc'))\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc['Sample'] = f'{exp.id}: ' + hplc['Sample'].astype(str)\n hplcs.append(hplc)\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc['Sample'] = exp.id\n fplcs.append(fplc)\n concat_exp = Experiment('concat')\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n return concat_exp\n",
"step-5": "import pandas as pd\nimport os\nfrom appia.processors.core import normalizer\nfrom math import ceil\n\n\nclass Experiment:\n def __init__(self, id) -> None:\n self.id = id\n self.version = 4\n self._hplc = None\n self._fplc = None\n\n @property\n def hplc(self):\n try:\n return self._hplc\n except AttributeError:\n return None\n\n @hplc.setter\n def hplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n try:\n self._hplc = df.sort_values(by=[\"Normalization\", \"Channel\", \"mL\"])\n except AttributeError:\n self._hplc = df\n else:\n raise TypeError(\"HPLC input is not a pandas dataframe\")\n\n @property\n def fplc(self):\n try:\n return self._fplc\n except AttributeError:\n return None\n\n @fplc.setter\n def fplc(self, df):\n if isinstance(df, pd.DataFrame) or df is None:\n self._fplc = df\n else:\n raise TypeError(\"FPLC input is not a pandas dataframe\")\n\n @property\n def wide(self):\n wide = self.hplc.copy()\n wide = wide.loc[wide[\"Normalization\"] == \"Signal\"]\n wide[\"Sample\"] = wide[\"Sample\"].astype(str) + \" \" + wide[\"Channel\"]\n wide.drop([\"Channel\", \"Normalization\"], axis=1)\n wide = wide.pivot_table(index=\"Time\", columns=\"Sample\", values=\"Value\")\n return wide\n\n def __repr__(self):\n to_return = f'Experiment \"{self.id}\" with '\n if self.hplc is not None:\n to_return += \"HPLC \"\n if self.hplc is not None and self.fplc is not None:\n to_return += \"and \"\n if self.fplc is not None:\n to_return += \"FPLC \"\n if self.hplc is None and self.fplc is None:\n to_return += \"no \"\n to_return += \"data\"\n\n return to_return\n\n def extend_hplc(self, hplc):\n if not isinstance(hplc, pd.DataFrame):\n raise TypeError(f\"Tried to extend experiment hplc with {type(hplc)}\")\n\n self.hplc = pd.concat([self.hplc, hplc])\n\n def show_tables(self):\n print(\"HPLC:\")\n print(self.hplc)\n print(\"FPLC:\")\n print(self.fplc)\n\n def jsonify(self):\n if self.hplc is not None:\n hplc_json = (\n self.hplc.pivot_table(\n index=[\"mL\", \"Channel\", \"Time\", \"Normalization\"],\n columns=\"Sample\",\n values=\"Value\",\n )\n .reset_index()\n .to_json()\n )\n else:\n hplc_json = \"\"\n\n if self.fplc is not None:\n fplc_json = self.fplc.to_json()\n else:\n fplc_json = \"\"\n\n doc = {\n \"_id\": self.id,\n \"version\": self.version,\n \"hplc\": hplc_json,\n \"fplc\": fplc_json,\n }\n\n return doc\n\n def renormalize_hplc(self, norm_range, strict):\n if self.hplc is None:\n raise ValueError(\"No HPLC data\")\n\n # this arcane string of pandas commands is the equivalent of pivot_wider from tidyverse\n # from https://medium.com/@durgaswaroop/reshaping-pandas-dataframes-melt-and-unmelt-9f57518c7738;.'/\n hplc = self.hplc.pivot(\n index=[\"mL\", \"Sample\", \"Channel\", \"Time\"], columns=[\"Normalization\"]\n )[\"Value\"].reset_index()\n hplc = hplc.groupby([\"Sample\", \"Channel\"], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict)\n )\n hplc = hplc.melt(\n id_vars=[\"mL\", \"Sample\", \"Channel\", \"Time\"],\n value_vars=[\"Signal\", \"Normalized\"],\n var_name=\"Normalization\",\n value_name=\"Value\",\n )\n self.hplc = hplc\n\n def renormalize_fplc(self, norm_range, strict):\n if self.fplc is None:\n raise ValueError(\"No FPLC data\")\n\n fplc = self.fplc.pivot(\n index=[\"mL\", \"CV\", \"Fraction\", \"Channel\", \"Sample\"],\n columns=[\"Normalization\"],\n )[\"Value\"].reset_index()\n fplc = fplc.groupby([\"Sample\", \"Channel\"], group_keys=False).apply(\n lambda x: normalizer(x, norm_range, strict)\n )\n fplc = fplc.melt(\n id_vars=[\"mL\", \"CV\", \"Channel\", \"Fraction\", \"Sample\"],\n value_vars=[\"Signal\", \"Normalized\"],\n var_name=\"Normalization\",\n value_name=\"Value\",\n )\n self.fplc = fplc\n\n def reduce_hplc(self, num_points):\n # reduce the number of points in the hplc trace to num_points per sample/channel/norm\n\n def reduction_factor(df, final_points):\n reduction_factor = ceil(df.shape[0] / final_points)\n return df[::reduction_factor]\n\n try:\n self.hplc = self.hplc.groupby(\n [\"Channel\", \"Sample\", \"Normalization\"], group_keys=False, as_index=False\n ).apply(lambda x: reduction_factor(x, num_points))\n self.hplc = self.hplc.reset_index(drop=True)\n except AttributeError:\n return\n\n def rename_channels(self, channel_name_dict):\n self.hplc = self.hplc.replace({\"Channel\": channel_name_dict})\n\n def hplc_csv(self, outfile):\n if outfile[-4:] == \".csv\":\n outfile = outfile[:-4]\n if self.hplc is not None:\n self.hplc.to_csv(outfile + \"-long.csv\", index=False)\n self.wide.to_csv(outfile + \"-wide.csv\", index=True)\n\n return outfile + \"-long.csv\"\n\n def fplc_csv(self, outfile):\n if outfile[-4:] != \".csv\":\n outfile = outfile + \".csv\"\n\n if self.fplc is not None:\n self.fplc.to_csv(outfile, index=False)\n return outfile\n\n def save_csvs(self, path):\n hplc_csv = self.hplc_csv(os.path.join(path, f\"{self.id}_hplc\"))\n fplc_csv = self.fplc_csv(os.path.join(path, f\"{self.id}_fplc\"))\n\n return hplc_csv, fplc_csv\n\n\ndef concat_experiments(exp_list):\n hplcs = []\n fplcs = []\n\n for exp in [x for x in exp_list if x.hplc is not None]:\n hplc = exp.hplc\n hplc[\"Sample\"] = f\"{exp.id}: \" + hplc[\"Sample\"].astype(str)\n hplcs.append(hplc)\n\n for exp in [x for x in exp_list if x.fplc is not None]:\n fplc = exp.fplc\n fplc[\"Sample\"] = exp.id\n fplcs.append(fplc)\n\n concat_exp = Experiment(\"concat\")\n try:\n concat_exp.hplc = pd.concat(hplcs)\n except ValueError:\n pass\n\n try:\n concat_exp.fplc = pd.concat(fplcs)\n except ValueError:\n pass\n\n return concat_exp\n",
"step-ids": [
11,
14,
19,
20,
21
]
}
|
[
11,
14,
19,
20,
21
] |
try:
from setuptools import setup
from setuptools import find_packages
has_setup_tools = true
except ImportError:
from distutils.core import setup
has_setup_tools = false
with open("README.md", "r") as fh:
long_description = fh.read()
if has_setup_tools is True:
packages = setuptools.find_packages()
else:
packages = ["otmux"]
setup(
name="otmux",
version="__version",
description="multiple remote activities using ssh and tmux",
long_description=long_description,
url="https://github.com/rda3mon/otmux",
author="Mallikarjun",
author_email="[email protected]",
license="Apache License 2.0",
packages=["otmux"],
classifiers=[
'Topic :: tmux :: ssh',
'Development Status :: 2 - Experimental/Unstable',
'Environment :: Console',
'License :: Apache License 2.0',
'Programming Language :: Python :: 2.7',
"Operating System :: OS Independent"
]
)
|
normal
|
{
"blob_id": "5d988d159902e4a4cb17ee0ec61153de2dda4691",
"index": 9120,
"step-1": "<mask token>\n",
"step-2": "try:\n from setuptools import setup\n from setuptools import find_packages\n has_setup_tools = true\nexcept ImportError:\n from distutils.core import setup\n has_setup_tools = false\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\nif has_setup_tools is True:\n packages = setuptools.find_packages()\nelse:\n packages = ['otmux']\nsetup(name='otmux', version='__version', description=\n 'multiple remote activities using ssh and tmux', long_description=\n long_description, url='https://github.com/rda3mon/otmux', author=\n 'Mallikarjun', author_email='[email protected]', license=\n 'Apache License 2.0', packages=['otmux'], classifiers=[\n 'Topic :: tmux :: ssh',\n 'Development Status :: 2 - Experimental/Unstable',\n 'Environment :: Console', 'License :: Apache License 2.0',\n 'Programming Language :: Python :: 2.7',\n 'Operating System :: OS Independent'])\n",
"step-3": "try:\n from setuptools import setup\n from setuptools import find_packages\n has_setup_tools = true\nexcept ImportError:\n from distutils.core import setup\n has_setup_tools = false\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif has_setup_tools is True:\n packages = setuptools.find_packages()\nelse:\n packages = [\"otmux\"]\n\nsetup(\n name=\"otmux\",\n version=\"__version\",\n description=\"multiple remote activities using ssh and tmux\",\n long_description=long_description,\n url=\"https://github.com/rda3mon/otmux\",\n author=\"Mallikarjun\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n packages=[\"otmux\"],\n classifiers=[\n 'Topic :: tmux :: ssh',\n 'Development Status :: 2 - Experimental/Unstable',\n 'Environment :: Console',\n 'License :: Apache License 2.0',\n 'Programming Language :: Python :: 2.7',\n \"Operating System :: OS Independent\"\n ]\n)\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import numpy as np
from . import tmp_dir_fixture
from . import TEST_SAMPLE_DATA
def test_tensor_dataset_functional():
from dtoolai.data import TensorDataSet
tds_uri = os.path.join(TEST_SAMPLE_DATA, "example_tensor_dataset")
tds = TensorDataSet(tds_uri)
assert tds.name == "example_tensor_dataset"
assert tds.uuid == "6b6f9a0e-8547-4903-9090-6dcfc6abdf83"
assert len(tds) == 100
data, label = tds[0]
assert data.shape == (1, 9, 9)
assert data[0][0][0] == 0
assert label == 0
assert tds.input_channels == 1
assert tds.dim == 9
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = "http://bit.ly/2Uho6tN"
ids = ImageDataSet(ids_uri)
assert ids.name == "tiny.image.dataset.example"
assert ids.uuid == "839ae396-74a7-44f9-9b08-436be53b1090"
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
|
normal
|
{
"blob_id": "97dfcce6e82ef33334b49de72bb126150dfef196",
"index": 2844,
"step-1": "<mask token>\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-2": "<mask token>\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-3": "<mask token>\n\n\ndef test_tensor_dataset_functional():\n from dtoolai.data import TensorDataSet\n tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')\n tds = TensorDataSet(tds_uri)\n assert tds.name == 'example_tensor_dataset'\n assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'\n assert len(tds) == 100\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-4": "import os\nimport numpy as np\nfrom . import tmp_dir_fixture\nfrom . import TEST_SAMPLE_DATA\n\n\ndef test_tensor_dataset_functional():\n from dtoolai.data import TensorDataSet\n tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')\n tds = TensorDataSet(tds_uri)\n assert tds.name == 'example_tensor_dataset'\n assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'\n assert len(tds) == 100\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-5": "import os\n\nimport numpy as np\n\nfrom . import tmp_dir_fixture\nfrom . import TEST_SAMPLE_DATA\n\n\n\ndef test_tensor_dataset_functional():\n\n from dtoolai.data import TensorDataSet\n\n tds_uri = os.path.join(TEST_SAMPLE_DATA, \"example_tensor_dataset\")\n\n tds = TensorDataSet(tds_uri)\n assert tds.name == \"example_tensor_dataset\"\n assert tds.uuid == \"6b6f9a0e-8547-4903-9090-6dcfc6abdf83\"\n assert len(tds) == 100\n\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n\n from dtoolai.data import ImageDataSet\n\n ids_uri = \"http://bit.ly/2Uho6tN\"\n\n ids = ImageDataSet(ids_uri)\n assert ids.name == \"tiny.image.dataset.example\"\n assert ids.uuid == \"839ae396-74a7-44f9-9b08-436be53b1090\"\n assert len(ids) == 6\n\n assert ids.input_channels == 3\n assert ids.dim == 256\n\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n \n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pytest
from debbiedowner import make_it_negative, complain_about
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == "I hate enthusiasm. Totally boring."
def test_easy():
assert 1 == 1
def test_cleverness():
assert make_it_negative(-3) == 3
|
normal
|
{
"blob_id": "e73e40a63b67ee1a6cca53a328af05e3eb3d8519",
"index": 703,
"step-1": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\n<mask token>\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-3": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\ndef test_easy():\n assert 1 == 1\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-4": "import pytest\nfrom debbiedowner import make_it_negative, complain_about\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\ndef test_easy():\n assert 1 == 1\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-5": "import pytest\n\nfrom debbiedowner import make_it_negative, complain_about\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == \"I hate enthusiasm. Totally boring.\"\n\ndef test_easy():\n assert 1 == 1\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
name = input("Введите ваше имя ")
print("Добрый день,", name)
|
normal
|
{
"blob_id": "e44c4b2c3b60d34d4540ec2d3a782c777c52fbc0",
"index": 8662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Добрый день,', name)\n",
"step-3": "name = input('Введите ваше имя ')\nprint('Добрый день,', name)\n",
"step-4": "name = input(\"Введите ваше имя \")\nprint(\"Добрый день,\", name)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python2
import os
import sys
import textwrap
COMMAND = (
'convert -size 1920x1080 canvas:"rgb(149, 1, 1)" '
'-font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none '
'-fill white -annotate 0 "{1}" -size 1920x1080 "{2}.png"'
)
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = "\n".join(
a.replace("\\n", "\n") for a in tw.wrap(text)
)
filename = "".join(
c
for c in text.replace(" ", "-")
if c.isalpha() or c.isdigit() or c in ["-", "_"]
)
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print("Too large.")
sys.exit(2)
if len(sys.argv) > 2:
text = " ".join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input("Text: ")
makeimage(text, pt, width)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a486ec6b27a6b84e454a1bed096be9fe22d91612",
"index": 1561,
"step-1": "<mask token>\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n )\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport textwrap\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n )\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python2\nimport os\nimport sys\nimport textwrap\n\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" '\n '-font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none '\n '-fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n)\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = \"\\n\".join(\n a.replace(\"\\\\n\", \"\\n\") for a in tw.wrap(text)\n )\n\n filename = \"\".join(\n c\n for c in text.replace(\" \", \"-\")\n if c.isalpha() or c.isdigit() or c in [\"-\", \"_\"]\n )\n\n\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n\n if width < 10:\n print(\"Too large.\")\n sys.exit(2)\n\n if len(sys.argv) > 2:\n text = \" \".join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n\n if not text:\n text = input(\"Text: \")\n\n makeimage(text, pt, width)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import smtplib
from email.message import EmailMessage
from functools import wraps
from threading import Thread
import flask_login
from flask import flash, current_app
from togger import db
from togger.auth.models import User, Role
from togger.calendar.models import Calendar
def get_user(username):
if username is None:
return
user = User.query.filter(User.username == username).first()
return user
def get_user_by_id(id):
if id is None:
return
user = User.query.filter(User.alias_id == id).first()
return user
def add_user(username, password, first_name, last_name):
if username is None or password is None:
return
calendar = Calendar(name=username)
role = Role(type=Role.OWNER, calendar=calendar, is_default=True)
user = User(username=username, first_name=first_name, last_name=last_name, roles=[role])
user.set_password(password)
verify_email(user)
db.session.add(user)
db.session.commit()
return user
def update_user(first_name, last_name):
user = flask_login.current_user
user.first_name = first_name
user.last_name = last_name
db.session.merge(user)
db.session.commit()
return True
def verify_email(user):
token = user.generate_validate_token()
url = current_app.config['APP_URL'] + "/auth/verify/" + token
subject = "[Togger] Welcome to Togger. Verify your email"
prepare_email(user.username, subject, url)
def restore_password(token, new_password):
user = User()
if user.check_password_token(token):
user = get_user(user.username)
user.set_password(new_password)
db.session.merge(user)
db.session.commit()
flask_login.login_user(user, remember=True)
return True
else:
flash("Restoration link got expired. Please request a new one.", 'danger')
return False
def password_email(username):
user = get_user(username)
if user and user.is_verified:
token = user.generate_password_token()
url = current_app.config['APP_URL'] + "/auth/restore/" + token
subject = "[Togger] Forgot your password? The restoration link is inside"
prepare_email(user.username, subject, url)
def prepare_email(address, subject, content):
thread = Thread(target=send_email,
args=(address, subject, content, current_app.config,))
thread.daemon = True
thread.start()
def send_email(username, subject, content, config):
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = subject
msg['From'] = config['SMTP_MAILBOX']
msg['To'] = username
s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])
s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])
s.send_message(msg)
s.quit()
def confirm_verify_email(token):
user = User()
if user.check_validate_token(token):
user = get_user(user.username)
user.is_verified = True
db.session.merge(user)
db.session.commit()
else:
flash('Verification link got expired. Please request a new one.', 'danger')
def change_password(old_password, new_password):
if flask_login.current_user.check_password(old_password):
flask_login.current_user.set_password(new_password)
db.session.merge(flask_login.current_user)
db.session.commit()
flash('Password was changed. Please sign in using new password.', 'success')
return True
flash('Current password is incorrect.', 'danger')
return False
def get_roles():
try:
return flask_login.current_user.roles
except AttributeError:
return []
def get_role():
for role in get_roles():
if role.is_default:
return role
return None
def has_role(role_type):
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
role = get_role()
if role and role.type >= role_type:
result = function(*args, **kwargs)
else:
result = current_app.login_manager.unauthorized()
return result
return wrapper
return decorator
|
normal
|
{
"blob_id": "fab3e524edf6783775fabf402f9148bf31ac06d6",
"index": 2914,
"step-1": "<mask token>\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\n<mask token>\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n",
"step-2": "<mask token>\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\n<mask token>\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n",
"step-3": "<mask token>\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=\n last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n",
"step-4": "import smtplib\nfrom email.message import EmailMessage\nfrom functools import wraps\nfrom threading import Thread\nimport flask_login\nfrom flask import flash, current_app\nfrom togger import db\nfrom togger.auth.models import User, Role\nfrom togger.calendar.models import Calendar\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=\n last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\ndef prepare_email(address, subject, content):\n thread = Thread(target=send_email, args=(address, subject, content,\n current_app.config))\n thread.daemon = True\n thread.start()\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n",
"step-5": "import smtplib\nfrom email.message import EmailMessage\nfrom functools import wraps\nfrom threading import Thread\n\nimport flask_login\nfrom flask import flash, current_app\n\nfrom togger import db\nfrom togger.auth.models import User, Role\nfrom togger.calendar.models import Calendar\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + \"/auth/verify/\" + token\n subject = \"[Togger] Welcome to Togger. Verify your email\"\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash(\"Restoration link got expired. Please request a new one.\", 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + \"/auth/restore/\" + token\n subject = \"[Togger] Forgot your password? The restoration link is inside\"\n prepare_email(user.username, subject, url)\n\n\ndef prepare_email(address, subject, content):\n thread = Thread(target=send_email,\n args=(address, subject, content, current_app.config,))\n thread.daemon = True\n thread.start()\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.', 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.', 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
import os
import pytest
def get_client():
from apiserver import app, is_caching_enabled
app.config['TESTING'] = True
app.enable_cache(is_caching_enabled())
return app.test_client()
@pytest.fixture
def client():
os.environ['FLASK_ENV'] = 'testing'
yield get_client()
@pytest.fixture
def client_with_caching():
os.environ['FLASK_ENV'] = 'production'
yield get_client()
|
normal
|
{
"blob_id": "c0b5a0605bdfcb7cb84211d3ad0d24f78f838cdf",
"index": 5421,
"step-1": "<mask token>\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\[email protected]\ndef client_with_caching():\n os.environ['FLASK_ENV'] = 'production'\n yield get_client()\n",
"step-4": "import os\nimport pytest\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\[email protected]\ndef client_with_caching():\n os.environ['FLASK_ENV'] = 'production'\n yield get_client()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
:Author :weijinlong
:Time: :2020/1/10 17:22
:File :graph.py
:content:
"""
import tensorflow as tf
from .base import TFLayer
class TFModel(TFLayer):
def build_model(self):
raise NotImplementedError
def add_outputs(self, *args, **kwargs):
"""模型的输出值
:param args:
:param kwargs:
:return:
"""
outputs = {}
for value in args:
assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor"
name = value.name
outputs[name.split(':')[0]] = name
for key, value in kwargs.items():
assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor"
outputs[key] = value.name
self.update_outputs(outputs)
class TFCompile(TFLayer):
def compile(self):
raise NotImplementedError
def add_metrics(self, *args, **kwargs):
"""加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作
:param args:
:param kwargs:
:return:
"""
metrics = {}
for value in args:
assert isinstance(value, (tf.Operation, tf.Tensor)), \
"function add_metrics parameter's value must be tf.Operation"
name = value.name
metrics[name.split(':')[0]] = name
for key, value in kwargs.items():
assert isinstance(value, (tf.Operation, tf.Tensor)), \
"function add_metrics parameter's value must be tf.Operation"
metrics[key] = value.name
self.update_metrics(metrics)
@property
def fetches(self):
""" 获取模型输出值或者评估值, 来优化训练模型
:return:
"""
return self.metrics
class TFComModel(TFModel, TFCompile):
"""
基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译
"""
def build_model(self):
raise NotImplementedError
def compile(self):
pass
|
normal
|
{
"blob_id": "cdabb4a118cb0ef55c271a446fa190a457ebe142",
"index": 7383,
"step-1": "<mask token>\n\n\nclass TFCompile(TFLayer):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass TFModel(TFLayer):\n <mask token>\n\n def add_outputs(self, *args, **kwargs):\n \"\"\"模型的输出值\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n outputs = {}\n for value in args:\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n name = value.name\n outputs[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n outputs[key] = value.name\n self.update_outputs(outputs)\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-4": "<mask token>\n\n\nclass TFModel(TFLayer):\n\n def build_model(self):\n raise NotImplementedError\n\n def add_outputs(self, *args, **kwargs):\n \"\"\"模型的输出值\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n outputs = {}\n for value in args:\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n name = value.name\n outputs[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n outputs[key] = value.name\n self.update_outputs(outputs)\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding:utf-8 _*-\r\n\r\n\"\"\"\r\n:Author :weijinlong\r\n:Time: :2020/1/10 17:22\r\n:File :graph.py\r\n:content:\r\n \r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom .base import TFLayer\r\n\r\n\r\nclass TFModel(TFLayer):\r\n\r\n def build_model(self):\r\n raise NotImplementedError\r\n\r\n def add_outputs(self, *args, **kwargs):\r\n \"\"\"模型的输出值\r\n\r\n :param args:\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n outputs = {}\r\n for value in args:\r\n assert isinstance(value, tf.Tensor), \"function add_outputs parameter's value must be tf.Tensor\"\r\n name = value.name\r\n outputs[name.split(':')[0]] = name\r\n for key, value in kwargs.items():\r\n assert isinstance(value, tf.Tensor), \"function add_outputs parameter's value must be tf.Tensor\"\r\n outputs[key] = value.name\r\n self.update_outputs(outputs)\r\n\r\n\r\nclass TFCompile(TFLayer):\r\n\r\n def compile(self):\r\n raise NotImplementedError\r\n\r\n def add_metrics(self, *args, **kwargs):\r\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\r\n\r\n :param args:\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n metrics = {}\r\n for value in args:\r\n assert isinstance(value, (tf.Operation, tf.Tensor)), \\\r\n \"function add_metrics parameter's value must be tf.Operation\"\r\n name = value.name\r\n metrics[name.split(':')[0]] = name\r\n for key, value in kwargs.items():\r\n assert isinstance(value, (tf.Operation, tf.Tensor)), \\\r\n \"function add_metrics parameter's value must be tf.Operation\"\r\n metrics[key] = value.name\r\n self.update_metrics(metrics)\r\n\r\n @property\r\n def fetches(self):\r\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\r\n\r\n :return:\r\n \"\"\"\r\n return self.metrics\r\n\r\n\r\nclass TFComModel(TFModel, TFCompile):\r\n \"\"\"\r\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\r\n \"\"\"\r\n\r\n def build_model(self):\r\n raise NotImplementedError\r\n\r\n def compile(self):\r\n pass\r\n",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
import math
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i*i, limit, i):
a[j] = 0;
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
|
normal
|
{
"blob_id": "ff6dc347637a81c9f6a541775646b4901d719790",
"index": 9478,
"step-1": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-3": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-4": "import math\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-5": "import math\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i*i, limit, i):\n a[j] = 0;\n return ans\n\nis_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x\n\nN = 10 ** 6\n\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
"""
import os
import json
import csv
cutoff = float(input("Tolerance (decimal)? "))
docpath = "C:/Users/RackS/Documents/"
out = open("isosegmenter_scoring_error"+str(cutoff*100)+".csv", 'w', encoding='UTF-8')
summary = open("isosegmenter_score_summary_error"+str(cutoff*100)+".txt", 'w', encoding='UTF-8')
out.write("SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n")
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "E" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",E,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("EQUAL-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_eq) + "\n")
summary.write("FP equal domain: " + str(fp_eq) + "\n")
summary.write("FN equal domain: " + str(fn_eq) + "\n")
summary.write("Sensitivity: " + str(round(tp_eq/(tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_eq/(tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + "\n\n")
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "V" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",V,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("VARIABLE-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_var) + "\n")
summary.write("FP equal domain: " + str(fp_var) + "\n")
summary.write("FN equal domain: " + str(fn_var) + "\n")
summary.write("Sensitivity: " + str(round(tp_var/(tp_var + fn_var),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_var/(tp_var + fp_var),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + "\n\n")
summary.write("OVERALL STATISTICS\n")
summary.write("TP: " + str(tp_var + tp_eq) + "\n")
summary.write("FP: " + str(fp_var + fp_eq) + "\n")
summary.write("FN: " + str(fn_var + fn_eq) + "\n")
summary.write("Sensitivity: " + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + "\n")
|
normal
|
{
"blob_id": "af2aa236f6bfc582093faf868a374be1ebdfabf2",
"index": 1235,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-3": "<mask token>\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-4": "<mask token>\nimport os\nimport json\nimport csv\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-5": "\"\"\"\n\"\"\"\nimport os\nimport json\nimport csv\n\ncutoff = float(input(\"Tolerance (decimal)? \"))\ndocpath = \"C:/Users/RackS/Documents/\"\nout = open(\"isosegmenter_scoring_error\"+str(cutoff*100)+\".csv\", 'w', encoding='UTF-8')\nsummary = open(\"isosegmenter_score_summary_error\"+str(cutoff*100)+\".txt\", 'w', encoding='UTF-8')\nout.write(\"SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n\")\n\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\n\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"E\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):\n true_boundaries.append(i)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",E,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"EQUAL-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_eq) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_eq) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_eq/(tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_eq/(tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + \"\\n\\n\")\n\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"V\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])\n true_boundaries.append(b_next)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",V,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"VARIABLE-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_var) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_var) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_var) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_var/(tp_var + fn_var),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_var/(tp_var + fp_var),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + \"\\n\\n\")\n \n\nsummary.write(\"OVERALL STATISTICS\\n\")\nsummary.write(\"TP: \" + str(tp_var + tp_eq) + \"\\n\")\nsummary.write(\"FP: \" + str(fp_var + fp_eq) + \"\\n\")\nsummary.write(\"FN: \" + str(fn_var + fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + \"\\n\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import renderdoc as rd
from typing import List
import rdtest
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
demos_test_name = 'D3D12_Resource_Mapping_Zoo'
def test_debug_pixel(self, x, y, test_name):
pipe: rd.PipeState = self.controller.GetPipelineState()
if not pipe.GetShaderReflection(rd.ShaderStage.Pixel).debugInfo.debuggable:
rdtest.log.print("Skipping undebuggable shader at {}.".format(test_name))
return
# Debug the shader
trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.ReplayController.NoPreference,
rd.ReplayController.NoPreference)
cycles, variables = self.process_trace(trace)
output = self.find_output_source_var(trace, rd.ShaderBuiltin.ColorOutput, 0)
debugged = self.evaluate_source_var(output, variables)
try:
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x, y, debugged.value.f32v[0:4])
except rdtest.TestFailureException as ex:
rdtest.log.error("Test {} did not match. {}".format(test_name, str(ex)))
return False
finally:
self.controller.FreeTrace(trace)
rdtest.log.success("Test {} matched as expected".format(test_name))
return True
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success("Shader debugging not enabled, skipping test")
return
failed = False
test_marker: rd.ActionDescription = self.find_action("sm_5_0")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, "sm_5_0") or failed
test_marker: rd.ActionDescription = self.find_action("sm_5_1")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, "sm_5_1") or failed
rdtest.log.begin_section("Resource array tests")
test_marker: rd.ActionDescription = self.find_action("ResArray")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y, "ResArray({},{})".format(x, y)) or failed
rdtest.log.end_section("Resource array tests")
rdtest.log.begin_section("Bindless tests")
test_marker: rd.ActionDescription = self.find_action("Bindless")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y, "Bindless({},{})".format(x, y)) or failed
rdtest.log.end_section("Bindless tests")
if failed:
raise rdtest.TestFailureException("Some tests were not as expected")
rdtest.log.success("All tests matched")
|
normal
|
{
"blob_id": "565888d771f53934805555390e48d4886a43bdb6",
"index": 189,
"step-1": "<mask token>\n\n\nclass D3D12_Resource_Mapping_Zoo(rdtest.TestCase):\n <mask token>\n <mask token>\n\n def check_capture(self):\n if not self.controller.GetAPIProperties().shaderDebugging:\n rdtest.log.success('Shader debugging not enabled, skipping test')\n return\n failed = False\n test_marker: rd.ActionDescription = self.find_action('sm_5_0')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed\n test_marker: rd.ActionDescription = self.find_action('sm_5_1')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed\n rdtest.log.begin_section('Resource array tests')\n test_marker: rd.ActionDescription = self.find_action('ResArray')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'ResArray({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Resource array tests')\n rdtest.log.begin_section('Bindless tests')\n test_marker: rd.ActionDescription = self.find_action('Bindless')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'Bindless({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Bindless tests')\n if failed:\n raise rdtest.TestFailureException('Some tests were not as expected'\n )\n rdtest.log.success('All tests matched')\n",
"step-2": "<mask token>\n\n\nclass D3D12_Resource_Mapping_Zoo(rdtest.TestCase):\n <mask token>\n\n def test_debug_pixel(self, x, y, test_name):\n pipe: rd.PipeState = self.controller.GetPipelineState()\n if not pipe.GetShaderReflection(rd.ShaderStage.Pixel\n ).debugInfo.debuggable:\n rdtest.log.print('Skipping undebuggable shader at {}.'.format(\n test_name))\n return\n trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.\n ReplayController.NoPreference, rd.ReplayController.NoPreference)\n cycles, variables = self.process_trace(trace)\n output = self.find_output_source_var(trace, rd.ShaderBuiltin.\n ColorOutput, 0)\n debugged = self.evaluate_source_var(output, variables)\n try:\n self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,\n y, debugged.value.f32v[0:4])\n except rdtest.TestFailureException as ex:\n rdtest.log.error('Test {} did not match. {}'.format(test_name,\n str(ex)))\n return False\n finally:\n self.controller.FreeTrace(trace)\n rdtest.log.success('Test {} matched as expected'.format(test_name))\n return True\n\n def check_capture(self):\n if not self.controller.GetAPIProperties().shaderDebugging:\n rdtest.log.success('Shader debugging not enabled, skipping test')\n return\n failed = False\n test_marker: rd.ActionDescription = self.find_action('sm_5_0')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed\n test_marker: rd.ActionDescription = self.find_action('sm_5_1')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed\n rdtest.log.begin_section('Resource array tests')\n test_marker: rd.ActionDescription = self.find_action('ResArray')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'ResArray({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Resource array tests')\n rdtest.log.begin_section('Bindless tests')\n test_marker: rd.ActionDescription = self.find_action('Bindless')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'Bindless({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Bindless tests')\n if failed:\n raise rdtest.TestFailureException('Some tests were not as expected'\n )\n rdtest.log.success('All tests matched')\n",
"step-3": "<mask token>\n\n\nclass D3D12_Resource_Mapping_Zoo(rdtest.TestCase):\n demos_test_name = 'D3D12_Resource_Mapping_Zoo'\n\n def test_debug_pixel(self, x, y, test_name):\n pipe: rd.PipeState = self.controller.GetPipelineState()\n if not pipe.GetShaderReflection(rd.ShaderStage.Pixel\n ).debugInfo.debuggable:\n rdtest.log.print('Skipping undebuggable shader at {}.'.format(\n test_name))\n return\n trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.\n ReplayController.NoPreference, rd.ReplayController.NoPreference)\n cycles, variables = self.process_trace(trace)\n output = self.find_output_source_var(trace, rd.ShaderBuiltin.\n ColorOutput, 0)\n debugged = self.evaluate_source_var(output, variables)\n try:\n self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,\n y, debugged.value.f32v[0:4])\n except rdtest.TestFailureException as ex:\n rdtest.log.error('Test {} did not match. {}'.format(test_name,\n str(ex)))\n return False\n finally:\n self.controller.FreeTrace(trace)\n rdtest.log.success('Test {} matched as expected'.format(test_name))\n return True\n\n def check_capture(self):\n if not self.controller.GetAPIProperties().shaderDebugging:\n rdtest.log.success('Shader debugging not enabled, skipping test')\n return\n failed = False\n test_marker: rd.ActionDescription = self.find_action('sm_5_0')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed\n test_marker: rd.ActionDescription = self.find_action('sm_5_1')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed\n rdtest.log.begin_section('Resource array tests')\n test_marker: rd.ActionDescription = self.find_action('ResArray')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'ResArray({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Resource array tests')\n rdtest.log.begin_section('Bindless tests')\n test_marker: rd.ActionDescription = self.find_action('Bindless')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'Bindless({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Bindless tests')\n if failed:\n raise rdtest.TestFailureException('Some tests were not as expected'\n )\n rdtest.log.success('All tests matched')\n",
"step-4": "import renderdoc as rd\nfrom typing import List\nimport rdtest\n\n\nclass D3D12_Resource_Mapping_Zoo(rdtest.TestCase):\n demos_test_name = 'D3D12_Resource_Mapping_Zoo'\n\n def test_debug_pixel(self, x, y, test_name):\n pipe: rd.PipeState = self.controller.GetPipelineState()\n if not pipe.GetShaderReflection(rd.ShaderStage.Pixel\n ).debugInfo.debuggable:\n rdtest.log.print('Skipping undebuggable shader at {}.'.format(\n test_name))\n return\n trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.\n ReplayController.NoPreference, rd.ReplayController.NoPreference)\n cycles, variables = self.process_trace(trace)\n output = self.find_output_source_var(trace, rd.ShaderBuiltin.\n ColorOutput, 0)\n debugged = self.evaluate_source_var(output, variables)\n try:\n self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,\n y, debugged.value.f32v[0:4])\n except rdtest.TestFailureException as ex:\n rdtest.log.error('Test {} did not match. {}'.format(test_name,\n str(ex)))\n return False\n finally:\n self.controller.FreeTrace(trace)\n rdtest.log.success('Test {} matched as expected'.format(test_name))\n return True\n\n def check_capture(self):\n if not self.controller.GetAPIProperties().shaderDebugging:\n rdtest.log.success('Shader debugging not enabled, skipping test')\n return\n failed = False\n test_marker: rd.ActionDescription = self.find_action('sm_5_0')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed\n test_marker: rd.ActionDescription = self.find_action('sm_5_1')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed\n rdtest.log.begin_section('Resource array tests')\n test_marker: rd.ActionDescription = self.find_action('ResArray')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'ResArray({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Resource array tests')\n rdtest.log.begin_section('Bindless tests')\n test_marker: rd.ActionDescription = self.find_action('Bindless')\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y,\n 'Bindless({},{})'.format(x, y)) or failed\n rdtest.log.end_section('Bindless tests')\n if failed:\n raise rdtest.TestFailureException('Some tests were not as expected'\n )\n rdtest.log.success('All tests matched')\n",
"step-5": "import renderdoc as rd\nfrom typing import List\nimport rdtest\n\n\nclass D3D12_Resource_Mapping_Zoo(rdtest.TestCase):\n demos_test_name = 'D3D12_Resource_Mapping_Zoo'\n\n def test_debug_pixel(self, x, y, test_name):\n pipe: rd.PipeState = self.controller.GetPipelineState()\n\n if not pipe.GetShaderReflection(rd.ShaderStage.Pixel).debugInfo.debuggable:\n rdtest.log.print(\"Skipping undebuggable shader at {}.\".format(test_name))\n return\n\n # Debug the shader\n trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.ReplayController.NoPreference,\n rd.ReplayController.NoPreference)\n\n cycles, variables = self.process_trace(trace)\n\n output = self.find_output_source_var(trace, rd.ShaderBuiltin.ColorOutput, 0)\n\n debugged = self.evaluate_source_var(output, variables)\n\n try:\n self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x, y, debugged.value.f32v[0:4])\n except rdtest.TestFailureException as ex:\n rdtest.log.error(\"Test {} did not match. {}\".format(test_name, str(ex)))\n return False\n finally:\n self.controller.FreeTrace(trace)\n\n rdtest.log.success(\"Test {} matched as expected\".format(test_name))\n return True\n\n def check_capture(self):\n if not self.controller.GetAPIProperties().shaderDebugging:\n rdtest.log.success(\"Shader debugging not enabled, skipping test\")\n return\n\n failed = False\n\n test_marker: rd.ActionDescription = self.find_action(\"sm_5_0\")\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, \"sm_5_0\") or failed\n\n test_marker: rd.ActionDescription = self.find_action(\"sm_5_1\")\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n failed = not self.test_debug_pixel(200, 200, \"sm_5_1\") or failed\n\n rdtest.log.begin_section(\"Resource array tests\")\n test_marker: rd.ActionDescription = self.find_action(\"ResArray\")\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y, \"ResArray({},{})\".format(x, y)) or failed\n\n rdtest.log.end_section(\"Resource array tests\")\n\n rdtest.log.begin_section(\"Bindless tests\")\n test_marker: rd.ActionDescription = self.find_action(\"Bindless\")\n action = test_marker.next\n self.controller.SetFrameEvent(action.eventId, False)\n\n for y in range(4):\n for x in range(4):\n failed = not self.test_debug_pixel(200 + x, 200 + y, \"Bindless({},{})\".format(x, y)) or failed\n\n rdtest.log.end_section(\"Bindless tests\")\n\n if failed:\n raise rdtest.TestFailureException(\"Some tests were not as expected\")\n\n rdtest.log.success(\"All tests matched\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Platformer Game
"""
import arcade
import os
from Toad_arcade import Toad
# Constants
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = "PyToads - Battletoads reimplementation"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = None
# Set up the player
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
#self.player.scale = 0.8
self.player_list.append(self.player)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "28d8f9d9b39c40c43a362e57a7907c0a38a6bd05",
"index": 748,
"step-1": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n <mask token>\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport arcade\nimport os\nfrom Toad_arcade import Toad\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = 'PyToads - Battletoads reimplementation'\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nPlatformer Game\n\"\"\"\nimport arcade\nimport os\nfrom Toad_arcade import Toad\n# Constants\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = \"PyToads - Battletoads reimplementation\"\n\n# Constants used to scale our sprites from their original size\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n\n # Set the working directory (where we expect to find files) to the same\n # directory this .py file is in. You can leave this out of your own\n # code, but it is needed to easily run the examples using \"python -m\"\n # as mentioned at the top of this program.\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n \"\"\" Set up the game and initialize the variables. \"\"\"\n\n # Sprite lists\n self.player_list = None\n\n # Set up the player\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n # Set up the player\n self.score = 0\n self.player = Toad()\n\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n #self.player.scale = 0.8\n\n self.player_list.append(self.player)\n # Set the background color\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n # This command has to happen before we start drawing\n arcade.start_render()\n\n # Draw all the sprites.\n self.player_list.draw()\n\n # Put the text on the screen.\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
6,
7,
9,
12,
13
]
}
|
[
6,
7,
9,
12,
13
] |
# coding: utf-8
'''
Programa : py02_variavel.py
Homepage : http://www
Autor : Helber Palheta <[email protected]>
Execução:
python py02_variavel.py
'''
#variável curso e sua atribuição
curso = "Introdução a Biopython!"
#função print
print("Nome do Curso: "+curso)
|
normal
|
{
"blob_id": "ad59c1f0038294144b1c63db5f048b0a6b5ebb89",
"index": 4654,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Nome do Curso: ' + curso)\n",
"step-3": "<mask token>\ncurso = 'Introdução a Biopython!'\nprint('Nome do Curso: ' + curso)\n",
"step-4": "# coding: utf-8\n'''\n \n Programa : py02_variavel.py\n Homepage : http://www\n Autor : Helber Palheta <[email protected]>\n\n Execução:\n python py02_variavel.py\n\n''' \n#variável curso e sua atribuição\ncurso = \"Introdução a Biopython!\"\n\n#função print\nprint(\"Nome do Curso: \"+curso)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from trac.db import DatabaseManager
def do_upgrade(env, ver, cursor):
"""Change schema name from taskboard_schema to agiletools_version
"""
cursor.execute('UPDATE system SET name=%s WHERE name=%s',
("agiletools_version", "taskboard_schema"))
|
normal
|
{
"blob_id": "56ed5bb22d77f4d8c061f97d832a60ed9a106549",
"index": 5231,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s', (\n 'agiletools_version', 'taskboard_schema'))\n",
"step-3": "from trac.db import DatabaseManager\n\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s', (\n 'agiletools_version', 'taskboard_schema'))\n",
"step-4": "from trac.db import DatabaseManager\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import decimal
from typing import Union, List, Set
from sqlalchemy import text
from .model import BaseMixin
from ..core.db import db
Orders = List[Set(str, Union(str, int, decimal.Decimal))]
class BaseDBMgr:
def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:
'''获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
'''
res = {
'page': {
'current_page': page,
'per_page': per_page,
'total_page': 0,
'count': 0,
},
'items': []
}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page-1)*per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:
'''获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
'''
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:
'''获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
'''
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_:BaseMixin, data:dict)->int:
'''插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
'''
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_:BaseMixin, data:dict, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
return query.update(data, synchronize_session=False)
def delete(self, cls_:BaseMixin, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at==0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session=False)
db.commit()
return affect_rows
def count(self, cls_:BaseMixin, filters:set, field=None)->int:
'''获取满足条件的总行数
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@param string|None field 统计的字段
@return int
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
if field is None:
return query.count()
else:
return query.count(field)
|
normal
|
{
"blob_id": "2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08",
"index": 39,
"step-1": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-4": "<mask token>\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-5": "import math\nimport decimal\nfrom typing import Union, List, Set\n\nfrom sqlalchemy import text\n\nfrom .model import BaseMixin\nfrom ..core.db import db\n\n\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:\n '''获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n '''\n res = {\n 'page': {\n 'current_page': page,\n 'per_page': per_page,\n 'total_page': 0,\n 'count': 0,\n },\n 'items': []\n }\n query = db.query(cls_).filter(*filters)\n \n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n data = query.offset((page-1)*per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n \n return res\n\n\n def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:\n '''获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n '''\n query = db.query(cls_)\n \n if filters:\n query = query.filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n if limit != 0:\n query = query.limit(limit)\n \n query = query.all()\n\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n \n return items\n\n\n def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:\n '''获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n '''\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n\n def add(self, cls_:BaseMixin, data:dict)->int:\n '''插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n '''\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n\n def update(self, cls_:BaseMixin, data:dict, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n return query.update(data, synchronize_session=False)\n\n\n def delete(self, cls_:BaseMixin, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at==0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session=False)\n db.commit()\n return affect_rows\n\n\n def count(self, cls_:BaseMixin, filters:set, field=None)->int:\n '''获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n \n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-ids": [
3,
7,
8,
9,
11
]
}
|
[
3,
7,
8,
9,
11
] |
#!/usr/bin/python
#Autor: Jesus Fabian Cubas <[email protected]>
#if
sesion = 2
if sesion == 1 :
print 'estamos en la sesion 01'
elif sesion == 2 :
print 'estamos en la sesion 02'
else :
print 'no estamos en la sesion 01'
#while
edad = 0
while edad < 18 :
edad = edad + 1
print edad
#for
lista = ["a", "b", "c", "d"]
for elemento in lista :
print elemento
|
normal
|
{
"blob_id": "64c4b64b6fb0cfa25c17f66243c60a5dc0166017",
"index": 7698,
"step-1": "#!/usr/bin/python\n#Autor: Jesus Fabian Cubas <[email protected]>\n\n#if\nsesion = 2\nif sesion == 1 :\n\tprint 'estamos en la sesion 01'\nelif sesion == 2 :\n\tprint 'estamos en la sesion 02'\nelse :\n\tprint 'no estamos en la sesion 01'\n\n#while\nedad = 0\nwhile edad < 18 :\n\tedad = edad + 1\nprint edad\n\n#for\nlista = [\"a\", \"b\", \"c\", \"d\"]\nfor elemento in lista :\n\tprint elemento\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.db import models
from datetime import datetime
# Message model for testing purposes
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ":" + self.body
# Company model
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
# model for storing message and its prediction
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime("%m/%d/%Y, %H:%M:%S") + " " + self.prediction + ":" + self.message
|
normal
|
{
"blob_id": "47f6c4b3c279a065b8f21dab2faa71271db8d6ab",
"index": 6680,
"step-1": "<mask token>\n\n\nclass Company(models.Model):\n <mask token>\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-2": "<mask token>\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-5": "from django.db import models\r\nfrom datetime import datetime\r\n\r\n\r\n# Message model for testing purposes\r\nclass Message(models.Model):\r\n type = models.CharField(max_length=10)\r\n body = models.CharField(max_length=50)\r\n\r\n def __str__(self):\r\n return self.type + \":\" + self.body\r\n\r\n\r\n# Company model\r\nclass Company(models.Model):\r\n name = models.CharField(max_length=10)\r\n\r\n @classmethod\r\n def create(cls, name):\r\n company = cls(name=name)\r\n return company\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# model for storing message and its prediction\r\nclass Entry(models.Model):\r\n fetched_date = models.DateTimeField()\r\n message = models.CharField(max_length=200)\r\n prediction = models.CharField(max_length=10)\r\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\r\n\r\n @classmethod\r\n def create(cls, message, prediction, company):\r\n entry = cls(message=message, prediction=prediction, parent_company=company)\r\n entry.fetched_date = datetime.now()\r\n return entry\r\n\r\n def __str__(self):\r\n return self.fetched_date.strftime(\"%m/%d/%Y, %H:%M:%S\") + \" \" + self.prediction + \":\" + self.message\r\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
class NumMatrix(object):
def __init__(self, matrix):
if matrix:
self.dp = [[0] * (len(matrix[0]) + 1) for i in range(len(matrix)+1)]
for i in xrange(1,len(matrix)+1):
for j in xrange(1,len(matrix[0])+1):
self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
return self.dp[row2+1][col2+1] + self.dp[row1][col1] - self.dp[row1][col2+1] - self.dp[row2+1][col1]
# Your NumMatrix object will be instantiated and called as such:
matrix = [[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]]
for m in matrix:
print m
print
numMatrix = NumMatrix(matrix)
print numMatrix.sumRegion(2, 1, 4, 3)
print numMatrix.sumRegion(1, 2, 3, 4)
|
normal
|
{
"blob_id": "443ce5c2ec86b9f89ad39ef2ac6772fa002e7e16",
"index": 8377,
"step-1": "class NumMatrix(object):\n\n def __init__(self, matrix):\n if matrix:\n self.dp = [[0] * (len(matrix[0]) + 1) for i in range(len(matrix)+1)]\n for i in xrange(1,len(matrix)+1):\n for j in xrange(1,len(matrix[0])+1):\n self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]\n\n\n\n def sumRegion(self, row1, col1, row2, col2):\n\n return self.dp[row2+1][col2+1] + self.dp[row1][col1] - self.dp[row1][col2+1] - self.dp[row2+1][col1]\n\n\n\n# Your NumMatrix object will be instantiated and called as such:\nmatrix = [[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]]\nfor m in matrix:\n print m\nprint\nnumMatrix = NumMatrix(matrix)\nprint numMatrix.sumRegion(2, 1, 4, 3)\nprint numMatrix.sumRegion(1, 2, 3, 4)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
conf = {'PROJECT': 'WCCIA', 'NAS_FOLDER':
'Q:\\GROUPS\\CORP_JGS_DSE\\ATI\\quotations', 'DB_SERVER': '10.0.36.129',
'DB_PORT': '34000/'}
|
normal
|
{
"blob_id": "fbce185671267bd70cf7b91696867b72dfcc8d5b",
"index": 1585,
"step-1": "<mask token>\n",
"step-2": "conf = {'PROJECT': 'WCCIA', 'NAS_FOLDER':\n 'Q:\\\\GROUPS\\\\CORP_JGS_DSE\\\\ATI\\\\quotations', 'DB_SERVER': '10.0.36.129',\n 'DB_PORT': '34000/'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import urllib3
import json
def download(url):
print('Downloading ', url)
userAgent = 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'
userAgent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'
AcceptLanguage ='zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'
AcceptEncoding= 'gzip, deflate'
Accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
Cookie = 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'
http = urllib3.PoolManager(num_pools=5, headers={'User-Agent': userAgent,'Accept - Language': AcceptLanguage,
'Accept-Encoding': AcceptEncoding ,'Accept':Accept,
'Proxy-Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Cookie':Cookie})
r = http.request('GET', url)
print(r.status)
html = r.data.decode()
return html
if __name__ == '__main__':
demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'
demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'
demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='
for i in range(1,10):
demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='+str(i)+'&pageSize=1500'
ss = download(demoURL)
print(ss)
data = json.loads(ss)
for item in data:
# searchK = item['COUNT']
searchK = item['ID']
print(item['CONTENT'])
detailInfoJson = download(demoDetailUrl + str(searchK))
detailInfo = json.loads(detailInfoJson)
detailJson = '{'
for detail in detailInfo:
if detail['NAME'] != '注':
detailJson = detailJson + '"' + detail['NAME'] + '":"' + detail['CONTENT'] + '",'
detailJson = detailJson[:-1]
detailJson = detailJson + '}'
print(detailJson)
detailData = json.loads(detailJson)
# print(item['CONTENT'])
|
normal
|
{
"blob_id": "9d302ff2de8280bd8786794cdd533107d2a458bc",
"index": 5611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n )\n for i in range(1, 10):\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='\n + str(i) + '&pageSize=1500')\n ss = download(demoURL)\n print(ss)\n data = json.loads(ss)\n for item in data:\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'\n ] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n",
"step-4": "import urllib3\nimport json\n\n\ndef download(url):\n print('Downloading ', url)\n userAgent = (\n 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n )\n userAgent = (\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n )\n AcceptLanguage = 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding = 'gzip, deflate'\n Accept = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n )\n Cookie = (\n 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n )\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent':\n userAgent, 'Accept - Language': AcceptLanguage, 'Accept-Encoding':\n AcceptEncoding, 'Accept': Accept, 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0', 'Cookie': Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n )\n demoDetailUrl = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n )\n for i in range(1, 10):\n demoURL = (\n 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='\n + str(i) + '&pageSize=1500')\n ss = download(demoURL)\n print(ss)\n data = json.loads(ss)\n for item in data:\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'\n ] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n",
"step-5": "import urllib3\nimport json\ndef download(url):\n print('Downloading ', url)\n userAgent = 'Mozilla/5.0 (Linux; U; Android 10; zh-cn; MI 9 Build/QKQ1.190825.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1'\n userAgent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36'\n AcceptLanguage ='zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\n AcceptEncoding= 'gzip, deflate'\n Accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n\n Cookie = 'JSESSIONID=A58B0B1DC96828832B92EE91D9E92605.7; tuNQaYE2WCOr80S=O43ziCfC7BLZm.F5edsUL84qX_T8DekwZhjFvL0AXMCYWDFH2_2qqyIQwdLwjfJb; tuNQaYE2WCOr80T=4zC94ZgkJ7NBDRsPXe.HrtFd3tXcvwudE41SSD4iUqL2TMsVQSF_QZ8LinHlNDmqOg_SeNEwr7NLRVyTJ7tG81Q310tSQQPTX0GJJDgefw7pPhWCn2BTVLKZ.MM_8iydxo1hNiKsmf7t9C5h3dn5b0DwZgfFZIzR1Ji4dsQdfhFkYTG5rdPQUPR5Y9.SG8jXjtXLxhv98Jx9DkyPYf2HWMJSWhjZlSe1sjjzACwcCozHaqBCvc_6F9mVCbKTdW44GKor91iD_VU2yaig6LwIHC5lVS0hSMTZQVlYPRJiQPf9AdA'\n\n http = urllib3.PoolManager(num_pools=5, headers={'User-Agent': userAgent,'Accept - Language': AcceptLanguage,\n 'Accept-Encoding': AcceptEncoding ,'Accept':Accept,\n 'Proxy-Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Cookie':Cookie})\n r = http.request('GET', url)\n print(r.status)\n html = r.data.decode()\n return html\n\n\nif __name__ == '__main__':\n demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex=1&pageSize=1500'\n demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK=109228'\n demoDetailUrl = 'http://mobile.nmpa.gov.cn/datasearch/QueryRecord?tableId=25&searchF=ID&searchK='\n\n for i in range(1,10):\n demoURL = 'http://mobile.nmpa.gov.cn/datasearch/QueryList?tableId=25&searchF=Quick%20SearchK&pageIndex='+str(i)+'&pageSize=1500'\n ss = download(demoURL)\n\n print(ss)\n data = json.loads(ss)\n for item in data:\n # searchK = item['COUNT']\n searchK = item['ID']\n print(item['CONTENT'])\n detailInfoJson = download(demoDetailUrl + str(searchK))\n detailInfo = json.loads(detailInfoJson)\n detailJson = '{'\n for detail in detailInfo:\n if detail['NAME'] != '注':\n detailJson = detailJson + '\"' + detail['NAME'] + '\":\"' + detail['CONTENT'] + '\",'\n detailJson = detailJson[:-1]\n detailJson = detailJson + '}'\n print(detailJson)\n detailData = json.loads(detailJson)\n # print(item['CONTENT'])\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Proper parenthetics extra credit kata."""
from _que_structure import Q
def proper_parenthetics(string):
"""Return if parentheses are matching or not."""
if isinstance(string, str):
paren_q = Q()
for i in range(len(string)):
paren_q.enqueue(string[i])
opening_parens = 0
closing_parens = 0
while paren_q.size() > 0 and paren_q.queue.head is not None:
i = paren_q.dequeue()
if i != '(' and i != ')':
raise TypeError('proper_parenthetics takes only parentheses.')
if i == '(' and closing_parens == 0:
opening_parens += 1
elif i == '(' and closing_parens > 0:
closing_parens -= 1
elif i == ')' and opening_parens == 0:
return -1
elif i == ')' and opening_parens > 0:
opening_parens -= 1
if opening_parens - closing_parens == 0:
return 0
if opening_parens - closing_parens > 0:
return 1
raise TypeError('proper_parenthetics takes only strings')
|
normal
|
{
"blob_id": "a28ece0db9bf0d4c3ab26207216b1da45f7aaa0f",
"index": 7582,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef proper_parenthetics(string):\n \"\"\"Return if parentheses are matching or not.\"\"\"\n if isinstance(string, str):\n paren_q = Q()\n for i in range(len(string)):\n paren_q.enqueue(string[i])\n opening_parens = 0\n closing_parens = 0\n while paren_q.size() > 0 and paren_q.queue.head is not None:\n i = paren_q.dequeue()\n if i != '(' and i != ')':\n raise TypeError('proper_parenthetics takes only parentheses.')\n if i == '(' and closing_parens == 0:\n opening_parens += 1\n elif i == '(' and closing_parens > 0:\n closing_parens -= 1\n elif i == ')' and opening_parens == 0:\n return -1\n elif i == ')' and opening_parens > 0:\n opening_parens -= 1\n if opening_parens - closing_parens == 0:\n return 0\n if opening_parens - closing_parens > 0:\n return 1\n raise TypeError('proper_parenthetics takes only strings')\n",
"step-3": "<mask token>\nfrom _que_structure import Q\n\n\ndef proper_parenthetics(string):\n \"\"\"Return if parentheses are matching or not.\"\"\"\n if isinstance(string, str):\n paren_q = Q()\n for i in range(len(string)):\n paren_q.enqueue(string[i])\n opening_parens = 0\n closing_parens = 0\n while paren_q.size() > 0 and paren_q.queue.head is not None:\n i = paren_q.dequeue()\n if i != '(' and i != ')':\n raise TypeError('proper_parenthetics takes only parentheses.')\n if i == '(' and closing_parens == 0:\n opening_parens += 1\n elif i == '(' and closing_parens > 0:\n closing_parens -= 1\n elif i == ')' and opening_parens == 0:\n return -1\n elif i == ')' and opening_parens > 0:\n opening_parens -= 1\n if opening_parens - closing_parens == 0:\n return 0\n if opening_parens - closing_parens > 0:\n return 1\n raise TypeError('proper_parenthetics takes only strings')\n",
"step-4": "\"\"\"Proper parenthetics extra credit kata.\"\"\"\n\nfrom _que_structure import Q\n\n\ndef proper_parenthetics(string):\n \"\"\"Return if parentheses are matching or not.\"\"\"\n if isinstance(string, str):\n paren_q = Q()\n for i in range(len(string)):\n paren_q.enqueue(string[i])\n opening_parens = 0\n closing_parens = 0\n while paren_q.size() > 0 and paren_q.queue.head is not None:\n i = paren_q.dequeue()\n if i != '(' and i != ')':\n raise TypeError('proper_parenthetics takes only parentheses.')\n if i == '(' and closing_parens == 0:\n opening_parens += 1\n elif i == '(' and closing_parens > 0:\n closing_parens -= 1\n elif i == ')' and opening_parens == 0:\n return -1\n elif i == ')' and opening_parens > 0:\n opening_parens -= 1\n if opening_parens - closing_parens == 0:\n return 0\n if opening_parens - closing_parens > 0:\n return 1\n raise TypeError('proper_parenthetics takes only strings')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import FWCore.ParameterSet.Config as cms
from RecoTracker.MeasurementDet.UpdaterService_cfi import *
from RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *
|
normal
|
{
"blob_id": "e79505e802a06f091bbb12708c45e04c4e80da60",
"index": 7618,
"step-1": "<mask token>\n",
"step-2": "import FWCore.ParameterSet.Config as cms\nfrom RecoTracker.MeasurementDet.UpdaterService_cfi import *\nfrom RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
CP1404 Practical
unreliable car test
"""
from unreliable_car import UnreliableCar
def main():
good_car = UnreliableCar("good car", 100, 80)
bad_car = UnreliableCar("bad car", 100, 10)
for i in range(10):
print("try to drive {} km".format(i))
print("{:10} drove {:2}km".format(good_car.name, good_car.drive(i)))
print("{:10} drove {:2}km".format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f29ad02f3781c7a7d2a1f0c97626dd5c7ea2417e",
"index": 7867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nCP1404 Practical\nunreliable car test\n\"\"\"\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar(\"good car\", 100, 80)\n bad_car = UnreliableCar(\"bad car\", 100, 10)\n\n for i in range(10):\n print(\"try to drive {} km\".format(i))\n print(\"{:10} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:10} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
archivo = open("salida2.csv", "a+")
startTime = datetime.now()
def mergeSort(alist):
print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
a=0
b=0
k=0
while a < len(lefthalf) and b < len(righthalf):
if lefthalf[a] < righthalf[b]:
alist[k]=lefthalf[a]
a=a+1
else:
alist[k]=righthalf[b]
b=b+1
k=k+1
while a < len(lefthalf):
alist[k]=lefthalf[a]
a=a+1
k=k+1
while b < len(righthalf):
alist[k]=righthalf[b]
b=b+1
k=k+1
alist = []
N = int(input(""))
nums = input("").split()
for a in nums:
alist.append(int(a))
mergeSort(alist)
print(' '.join(str(a) for a in alist)+' \n')
tiempo = datetime.now() - startTime
archivo.write(str(N)+",")
archivo.write(str(tiempo)+"\n")
archivo.close()
|
normal
|
{
"blob_id": "9e98c6b59433369bca3d4f7ae261f7e7ab3aae6b",
"index": 4161,
"step-1": "<mask token>\n\n\ndef mergeSort(alist):\n print('Splitting ', alist)\n if len(alist) > 1:\n mid = len(alist) // 2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n mergeSort(lefthalf)\n mergeSort(righthalf)\n a = 0\n b = 0\n k = 0\n while a < len(lefthalf) and b < len(righthalf):\n if lefthalf[a] < righthalf[b]:\n alist[k] = lefthalf[a]\n a = a + 1\n else:\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n while a < len(lefthalf):\n alist[k] = lefthalf[a]\n a = a + 1\n k = k + 1\n while b < len(righthalf):\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mergeSort(alist):\n print('Splitting ', alist)\n if len(alist) > 1:\n mid = len(alist) // 2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n mergeSort(lefthalf)\n mergeSort(righthalf)\n a = 0\n b = 0\n k = 0\n while a < len(lefthalf) and b < len(righthalf):\n if lefthalf[a] < righthalf[b]:\n alist[k] = lefthalf[a]\n a = a + 1\n else:\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n while a < len(lefthalf):\n alist[k] = lefthalf[a]\n a = a + 1\n k = k + 1\n while b < len(righthalf):\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n\n\n<mask token>\nfor a in nums:\n alist.append(int(a))\nmergeSort(alist)\nprint(' '.join(str(a) for a in alist) + ' \\n')\n<mask token>\narchivo.write(str(N) + ',')\narchivo.write(str(tiempo) + '\\n')\narchivo.close()\n",
"step-3": "<mask token>\narchivo = open('salida2.csv', 'a+')\nstartTime = datetime.now()\n\n\ndef mergeSort(alist):\n print('Splitting ', alist)\n if len(alist) > 1:\n mid = len(alist) // 2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n mergeSort(lefthalf)\n mergeSort(righthalf)\n a = 0\n b = 0\n k = 0\n while a < len(lefthalf) and b < len(righthalf):\n if lefthalf[a] < righthalf[b]:\n alist[k] = lefthalf[a]\n a = a + 1\n else:\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n while a < len(lefthalf):\n alist[k] = lefthalf[a]\n a = a + 1\n k = k + 1\n while b < len(righthalf):\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n\n\nalist = []\nN = int(input(''))\nnums = input('').split()\nfor a in nums:\n alist.append(int(a))\nmergeSort(alist)\nprint(' '.join(str(a) for a in alist) + ' \\n')\ntiempo = datetime.now() - startTime\narchivo.write(str(N) + ',')\narchivo.write(str(tiempo) + '\\n')\narchivo.close()\n",
"step-4": "from datetime import datetime\narchivo = open('salida2.csv', 'a+')\nstartTime = datetime.now()\n\n\ndef mergeSort(alist):\n print('Splitting ', alist)\n if len(alist) > 1:\n mid = len(alist) // 2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n mergeSort(lefthalf)\n mergeSort(righthalf)\n a = 0\n b = 0\n k = 0\n while a < len(lefthalf) and b < len(righthalf):\n if lefthalf[a] < righthalf[b]:\n alist[k] = lefthalf[a]\n a = a + 1\n else:\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n while a < len(lefthalf):\n alist[k] = lefthalf[a]\n a = a + 1\n k = k + 1\n while b < len(righthalf):\n alist[k] = righthalf[b]\n b = b + 1\n k = k + 1\n\n\nalist = []\nN = int(input(''))\nnums = input('').split()\nfor a in nums:\n alist.append(int(a))\nmergeSort(alist)\nprint(' '.join(str(a) for a in alist) + ' \\n')\ntiempo = datetime.now() - startTime\narchivo.write(str(N) + ',')\narchivo.write(str(tiempo) + '\\n')\narchivo.close()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\narchivo = open(\"salida2.csv\", \"a+\")\n\nstartTime = datetime.now()\ndef mergeSort(alist):\n print(\"Splitting \",alist)\n if len(alist)>1:\n mid = len(alist)//2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n\n mergeSort(lefthalf)\n mergeSort(righthalf)\n\n a=0\n b=0\n k=0\n while a < len(lefthalf) and b < len(righthalf):\n if lefthalf[a] < righthalf[b]:\n alist[k]=lefthalf[a]\n a=a+1\n else:\n alist[k]=righthalf[b]\n b=b+1\n k=k+1\n\n while a < len(lefthalf):\n alist[k]=lefthalf[a]\n a=a+1\n k=k+1\n\n while b < len(righthalf):\n alist[k]=righthalf[b]\n b=b+1\n k=k+1\n\nalist = []\nN = int(input(\"\"))\nnums = input(\"\").split()\nfor a in nums:\n alist.append(int(a))\nmergeSort(alist)\nprint(' '.join(str(a) for a in alist)+' \\n')\ntiempo = datetime.now() - startTime\n\narchivo.write(str(N)+\",\")\narchivo.write(str(tiempo)+\"\\n\")\narchivo.close()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from UI.Window import Window
class PolygonApplication:
def __init__(self):
self.window = Window(
"Détermination des périmètre, surface et centre de gravité d'un polygone"
)
self.window.addMouseClickListener(self.window.onClick)
def start(self):
self.window.show()
|
normal
|
{
"blob_id": "795bd22fb805069b342915638c52900ea52a4939",
"index": 9321,
"step-1": "<mask token>\n\n\nclass PolygonApplication:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PolygonApplication:\n <mask token>\n\n def start(self):\n self.window.show()\n",
"step-3": "<mask token>\n\n\nclass PolygonApplication:\n\n def __init__(self):\n self.window = Window(\n \"Détermination des périmètre, surface et centre de gravité d'un polygone\"\n )\n self.window.addMouseClickListener(self.window.onClick)\n\n def start(self):\n self.window.show()\n",
"step-4": "from UI.Window import Window\n\n\nclass PolygonApplication:\n\n def __init__(self):\n self.window = Window(\n \"Détermination des périmètre, surface et centre de gravité d'un polygone\"\n )\n self.window.addMouseClickListener(self.window.onClick)\n\n def start(self):\n self.window.show()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.