code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
__author__ = 'GazouillisTeam'
import numpy as np
import os
import sys
import time
from keras.callbacks import Callback
def save_architecture(model, path_out):
"""
Based on the keras utils 'model.summary()'
"""
# Redirect the print output the a textfile
orig_stdout = sys.stdout
# and store the architecture
f = file(os.path.join(path_out, "architecture.txt"), 'w')
sys.stdout = f
model.summary()
# Reset the print output direction
sys.stdout = orig_stdout
f.close()
open(os.path.join(path_out, "config.json"), 'w').write(model.to_json())
def create_log(path, settings, filename="log.txt"):
f = open(os.path.join(path, filename), "w")
f.writelines(str(settings))
f.writelines("\n####\nStarted on %s at %s\n" % (time.strftime("%d/%m/%Y"), time.strftime("%H:%M:%S")))
f.close()
def write_log(path, string, filename="log.txt"):
"""
Add a line at the end of a textfile.
:param path: textfile location
:param string: line to add
"""
# Open and Read
f = open(os.path.join(path, filename), "r")
lines = f.readlines()
f.close()
# Adding a line
lines.append(string)
# Write
f = open(os.path.join(path, filename), "w")
f.writelines(lines)
f.close()
class ModelSaver(Callback):
"""
Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,
the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.
The model after random can also be saved. And the model architecture is saved with the name 'config.network'.
Everything is stored using pickle.
"""
def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):
super(Callback, self).__init__()
self.verbose = verbose
self.path = path
self.path_weights = path_weights
self.monitor = monitor
self.best = np.Inf
self.h5py = h5py
def save_weights(self, path):
if not self.h5py: # H5PY not available : save weights using np.save
w = self.model.get_weights()
np.save(path+".npy", w)
else:
self.model.save_weights(path + ".h5py", overwrite=True)
def on_epoch_begin(self, epoch, logs={}):
self.epoch_start = time.time()
# Saving weights just after initialization
if epoch == 0:
save_path = os.path.join(self.path_weights, "after_initialization")
self.save_weights(save_path)
def on_epoch_end(self, epoch, logs={}):
self.epoch_end = time.time()
# get loss
monitor = logs.get(self.monitor)
# condition = True if loss decreased
condition = monitor < self.best
if condition:
# Save weights as "best_model.weights"
self.best = monitor
save_path = os.path.join(self.path_weights, "best_model")
self.save_weights(save_path)
else:
# Save weights as "last_epoch.weights"
save_path = os.path.join(self.path_weights, "last_epoch")
self.save_weights(save_path)
# Log file management
if self.verbose > 0:
log_string = "####\nEpoch %d took %d s: " % (epoch, int(self.epoch_end-self.epoch_start))
for k in logs.keys():
log_string += "%s : %.4f # " % (k, logs.get(k))
if condition:
log_string += "\tBEST"
write_log(self.path, log_string)
def trainargs2strings(path, model, dataset, index_train, index_valid, D, batch_size,
nsamples_per_epoch, nepoch, patience, lr):
settings = ""
settings += "Path : %s"%path
settings += "\nDataset shape :" + str(dataset.shape)
settings += "\nNtrain : %d"%len(index_train)
settings += "\nNvalid : %d"%len(index_valid)
settings += "\nDim : %d"%D
settings += "\nBatch size : %d"%batch_size
settings += "\nNb samples per epoch : %d"%nsamples_per_epoch
settings += "\nNb epochs : %d"%nepoch
settings += "\nPatience : %d"%patience
settings += "\nLR : %.5f"%lr
return settings
|
normal
|
{
"blob_id": "f8635c815b375dc77e971d4ea0f86547215ab2f9",
"index": 7987,
"step-1": "<mask token>\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D,\n batch_size, nsamples_per_epoch, nepoch, patience, lr):\n settings = ''\n settings += 'Path : %s' % path\n settings += '\\nDataset shape :' + str(dataset.shape)\n settings += '\\nNtrain : %d' % len(index_train)\n settings += '\\nNvalid : %d' % len(index_valid)\n settings += '\\nDim : %d' % D\n settings += '\\nBatch size : %d' % batch_size\n settings += '\\nNb samples per epoch : %d' % nsamples_per_epoch\n settings += '\\nNb epochs : %d' % nepoch\n settings += '\\nPatience : %d' % patience\n settings += '\\nLR : %.5f' % lr\n return settings\n",
"step-4": "<mask token>\n\n\ndef save_architecture(model, path_out):\n \"\"\"\n Based on the keras utils 'model.summary()'\n \"\"\"\n orig_stdout = sys.stdout\n f = file(os.path.join(path_out, 'architecture.txt'), 'w')\n sys.stdout = f\n model.summary()\n sys.stdout = orig_stdout\n f.close()\n open(os.path.join(path_out, 'config.json'), 'w').write(model.to_json())\n\n\ndef create_log(path, settings, filename='log.txt'):\n f = open(os.path.join(path, filename), 'w')\n f.writelines(str(settings))\n f.writelines('\\n####\\nStarted on %s at %s\\n' % (time.strftime(\n '%d/%m/%Y'), time.strftime('%H:%M:%S')))\n f.close()\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D,\n batch_size, nsamples_per_epoch, nepoch, patience, lr):\n settings = ''\n settings += 'Path : %s' % path\n settings += '\\nDataset shape :' + str(dataset.shape)\n settings += '\\nNtrain : %d' % len(index_train)\n settings += '\\nNvalid : %d' % len(index_valid)\n settings += '\\nDim : %d' % D\n settings += '\\nBatch size : %d' % batch_size\n settings += '\\nNb samples per epoch : %d' % nsamples_per_epoch\n settings += '\\nNb epochs : %d' % nepoch\n settings += '\\nPatience : %d' % patience\n settings += '\\nLR : %.5f' % lr\n return settings\n",
"step-5": "__author__ = 'GazouillisTeam'\n\nimport numpy as np\nimport os\nimport sys\nimport time\n\nfrom keras.callbacks import Callback\n\ndef save_architecture(model, path_out):\n \"\"\"\n Based on the keras utils 'model.summary()'\n \"\"\"\n # Redirect the print output the a textfile\n orig_stdout = sys.stdout\n # and store the architecture\n f = file(os.path.join(path_out, \"architecture.txt\"), 'w')\n sys.stdout = f\n model.summary()\n # Reset the print output direction\n sys.stdout = orig_stdout\n f.close()\n\n open(os.path.join(path_out, \"config.json\"), 'w').write(model.to_json())\n\ndef create_log(path, settings, filename=\"log.txt\"):\n f = open(os.path.join(path, filename), \"w\")\n f.writelines(str(settings))\n f.writelines(\"\\n####\\nStarted on %s at %s\\n\" % (time.strftime(\"%d/%m/%Y\"), time.strftime(\"%H:%M:%S\")))\n f.close()\n\ndef write_log(path, string, filename=\"log.txt\"):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n # Open and Read\n f = open(os.path.join(path, filename), \"r\")\n lines = f.readlines()\n f.close()\n # Adding a line\n lines.append(string)\n # Write\n f = open(os.path.join(path, filename), \"w\")\n f.writelines(lines)\n f.close()\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py: # H5PY not available : save weights using np.save\n w = self.model.get_weights()\n np.save(path+\".npy\", w)\n else:\n self.model.save_weights(path + \".h5py\", overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n # Saving weights just after initialization\n if epoch == 0:\n save_path = os.path.join(self.path_weights, \"after_initialization\")\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n # get loss\n monitor = logs.get(self.monitor)\n # condition = True if loss decreased\n condition = monitor < self.best\n\n if condition:\n # Save weights as \"best_model.weights\"\n self.best = monitor\n save_path = os.path.join(self.path_weights, \"best_model\")\n self.save_weights(save_path)\n else:\n # Save weights as \"last_epoch.weights\"\n save_path = os.path.join(self.path_weights, \"last_epoch\")\n self.save_weights(save_path)\n\n # Log file management\n if self.verbose > 0:\n log_string = \"####\\nEpoch %d took %d s: \" % (epoch, int(self.epoch_end-self.epoch_start))\n for k in logs.keys():\n log_string += \"%s : %.4f # \" % (k, logs.get(k))\n if condition:\n log_string += \"\\tBEST\"\n write_log(self.path, log_string)\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D, batch_size,\n nsamples_per_epoch, nepoch, patience, lr):\n settings = \"\"\n settings += \"Path : %s\"%path\n settings += \"\\nDataset shape :\" + str(dataset.shape)\n settings += \"\\nNtrain : %d\"%len(index_train)\n settings += \"\\nNvalid : %d\"%len(index_valid)\n settings += \"\\nDim : %d\"%D\n settings += \"\\nBatch size : %d\"%batch_size\n settings += \"\\nNb samples per epoch : %d\"%nsamples_per_epoch\n settings += \"\\nNb epochs : %d\"%nepoch\n settings += \"\\nPatience : %d\"%patience\n settings += \"\\nLR : %.5f\"%lr\n return settings",
"step-ids": [
6,
7,
8,
10,
13
]
}
|
[
6,
7,
8,
10,
13
] |
__all__ = ["loading"]
from . import loading
|
normal
|
{
"blob_id": "f633496f1a7cd562fd41d697a2e26831ceaef479",
"index": 8047,
"step-1": "<mask token>\n",
"step-2": "__all__ = ['loading']\n<mask token>\n",
"step-3": "__all__ = ['loading']\nfrom . import loading\n",
"step-4": "__all__ = [\"loading\"]\n\nfrom . import loading\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from pathlib import Path
import shutil
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
cart_sdf = os.path.join(get_package_share_directory('crs_support'),
'sdf', 'cart.sdf')
cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=
'gazebo_ros', node_executable='spawn_entity.py', arguments=[
'-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',
cart_sdf])
return launch.LaunchDescription([cart_spawner])
|
normal
|
{
"blob_id": "cc74163d5dbcc2b2ca0fe5222692f6f5e45f73fe",
"index": 2377,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_launch_description():\n cart_sdf = os.path.join(get_package_share_directory('crs_support'),\n 'sdf', 'cart.sdf')\n cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=\n 'gazebo_ros', node_executable='spawn_entity.py', arguments=[\n '-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',\n cart_sdf])\n return launch.LaunchDescription([cart_spawner])\n",
"step-3": "import os\nfrom pathlib import Path\nimport shutil\nfrom ament_index_python.packages import get_package_share_directory, get_package_prefix\nimport launch\nimport launch_ros.actions\n\n\ndef generate_launch_description():\n cart_sdf = os.path.join(get_package_share_directory('crs_support'),\n 'sdf', 'cart.sdf')\n cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=\n 'gazebo_ros', node_executable='spawn_entity.py', arguments=[\n '-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',\n cart_sdf])\n return launch.LaunchDescription([cart_spawner])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import datetime
class Schedule:
def __init__(self, start, end, name, other): # Constructor
self.start = self.str_convert(start) # Schedule start time (ex. 9:00)
self.end = self.str_convert(end) # Schedule end time (ex. 22:00)
self.name = name # Schedule name (ex. member name, final schedule, etc)
self.other = other # Schedule exceptions/"other"
self.array = self.create_array() # Schedule array (2D array of days of week (7) x half hour blocks)
def str_convert(self, str_time):
# Converts start/end time to datettime if entered as string
if isinstance(str_time, str):
str_time = datetime.datetime.strptime(str_time, '%H:%M')
return datetime.time(str_time.hour, str_time.minute)
return str_time
def create_array(self):
# Generate array from number of (30 minute) blocks
num_blocks = self.calculate_num_blocks(self.start, self.end)
return [[True for x in range(num_blocks)] for y in range(7)]
@staticmethod
def calculate_num_blocks(start, end):
# Determining size of array: get difference
total_hrs = end.hour - start.hour
total_mins = end.minute - start.minute
# Determining size of array: in 30 min blocks (rounded)
num_half_hr = int(total_mins/30)
num_blocks = 2 * total_hrs + num_half_hr
return num_blocks
# def get_time
def prep_visualize(self):
# Banner
print("\n######### VISUALIZING WEEK: " + self.name + " #########")
print(self.start, "-", self.end, "\n")
num_blocks = self.calculate_num_blocks(self.start, self.end)
days = ["S", "M", "T", "W", "R", "F", "S" ]
times = []
# Fill times column (from datetime obj)
# Convert to datetime.datetime object, add timedelta, convert back - arbitrary datetime.date(1, 1, 1)
dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)
for i in range(num_blocks):
num_blocks_i = datetime.timedelta(minutes=30*i)
combined = (dtdt + num_blocks_i).time()
times.append(combined.strftime("%H:%M"))
return days, times
def visualize(self):
days, times = self.prep_visualize()
# HEADER:
print("#####", end=" ")
for d in days: print("(" + d + ") ", end="")
print("#####")
# SCHEDULE:
for t in range(len(times)):
print(times[t], end=" ")
for d in range(7):
slot = self.array[d][t]
if slot is True: slot = " "
elif slot is False: slot = " x "
print(slot, end=" ")
print(times[t])
print()
def print_other(self):
print(self.name + "\t ", self.other.replace("\n", "; "))
class ExSchedule(Schedule):
def __init__(self, start, end, num_members, list_membs):
Schedule.__init__(self, start, end, "ExSched", None)
self.num_members = num_members
self.list_membs = list_membs
self.exarray = self.create_exarray()
def create_exarray(self):
num_blocks = Schedule.calculate_num_blocks(self.start, self.end)
return [[[True for z in range(self.num_members)] for x in range(num_blocks)] for y in range(7)]
def visualize(self):
days, times = Schedule.prep_visualize(self)
print("Members: "+ self.list_membs[:-2])
# HEADER:
print("##### ", end="")
# print(days)
# print(times)
for d in days:
num_spaces = len(self.exarray[0][1]) - 1
left_half = int(num_spaces / 2)
right_half = num_spaces - left_half
print("(", end="")
print(''.join([" " for x in range(left_half)]), end=d)
print(''.join([" " for x in range(right_half)]), end=")")
print(" #####")
# SCHEDULE:
for i in range(len(times)): # i: 0-26 (9:00) = m: 0-26 ([T,T,T])
print(times[i], end=" ")
for d in range(len(self.exarray)): # d: 0-6 (sun)
array = self.exarray[d][i]
print("[", end="")
for memb_avail in array:
print("-", end="") if memb_avail is True else print("*", end="")
print("]", end="")
print(" ", end=times[i]+"\n")
|
normal
|
{
"blob_id": "f56978d5738c2f8cb4ed5ce4f11d3aae6a9689b1",
"index": 4604,
"step-1": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n <mask token>\n <mask token>\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n <mask token>\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-2": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n <mask token>\n <mask token>\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-3": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n\n def str_convert(self, str_time):\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[(True) for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-4": "import datetime\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n\n def str_convert(self, str_time):\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[(True) for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-5": "import datetime\n\nclass Schedule:\n def __init__(self, start, end, name, other): # Constructor\n self.start = self.str_convert(start) # Schedule start time (ex. 9:00)\n self.end = self.str_convert(end) # Schedule end time (ex. 22:00)\n self.name = name # Schedule name (ex. member name, final schedule, etc)\n self.other = other # Schedule exceptions/\"other\"\n self.array = self.create_array() # Schedule array (2D array of days of week (7) x half hour blocks)\n\n def str_convert(self, str_time):\n # Converts start/end time to datettime if entered as string\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n # Generate array from number of (30 minute) blocks\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[True for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n # Determining size of array: get difference\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n\n # Determining size of array: in 30 min blocks (rounded)\n num_half_hr = int(total_mins/30)\n num_blocks = 2 * total_hrs + num_half_hr\n\n return num_blocks\n\n # def get_time\n\n def prep_visualize(self):\n # Banner\n print(\"\\n######### VISUALIZING WEEK: \" + self.name + \" #########\")\n print(self.start, \"-\", self.end, \"\\n\")\n\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = [\"S\", \"M\", \"T\", \"W\", \"R\", \"F\", \"S\" ]\n times = []\n\n # Fill times column (from datetime obj)\n # Convert to datetime.datetime object, add timedelta, convert back - arbitrary datetime.date(1, 1, 1)\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30*i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime(\"%H:%M\"))\n\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n\n # HEADER:\n print(\"#####\", end=\" \")\n for d in days: print(\"(\" + d + \") \", end=\"\")\n print(\"#####\")\n\n # SCHEDULE:\n for t in range(len(times)):\n print(times[t], end=\" \")\n for d in range(7):\n slot = self.array[d][t]\n if slot is True: slot = \" \"\n elif slot is False: slot = \" x \"\n print(slot, end=\" \")\n print(times[t])\n print()\n\n def print_other(self): \n print(self.name + \"\\t \", self.other.replace(\"\\n\", \"; \"))\n\n\nclass ExSchedule(Schedule):\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, \"ExSched\", None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[True for z in range(self.num_members)] for x in range(num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print(\"Members: \"+ self.list_membs[:-2])\n\n # HEADER:\n print(\"##### \", end=\"\")\n # print(days)\n # print(times)\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n\n print(\"(\", end=\"\")\n print(''.join([\" \" for x in range(left_half)]), end=d)\n print(''.join([\" \" for x in range(right_half)]), end=\")\")\n print(\" #####\")\n\n # SCHEDULE:\n for i in range(len(times)): # i: 0-26 (9:00) = m: 0-26 ([T,T,T])\n print(times[i], end=\" \")\n for d in range(len(self.exarray)): # d: 0-6 (sun)\n array = self.exarray[d][i]\n print(\"[\", end=\"\")\n for memb_avail in array:\n print(\"-\", end=\"\") if memb_avail is True else print(\"*\", end=\"\")\n print(\"]\", end=\"\")\n print(\" \", end=times[i]+\"\\n\")\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
#-*- coding: utf-8 -*-
def print99():
"""
打印99乘法口诀表
:return:
"""
for i in range(1,10):
for j in range(1, i+1):
print('%dX%d=%2s ' %(j,i,i*j))
print('\n')
print99()
|
normal
|
{
"blob_id": "90f1fd45d58c7e6f275a33cd9c693ff584b2df47",
"index": 1396,
"step-1": "<mask token>\n",
"step-2": "def print99():\n \"\"\"\n 打印99乘法口诀表\n :return:\n \"\"\"\n for i in range(1, 10):\n for j in range(1, i + 1):\n print('%dX%d=%2s ' % (j, i, i * j))\n print('\\n')\n\n\n<mask token>\n",
"step-3": "def print99():\n \"\"\"\n 打印99乘法口诀表\n :return:\n \"\"\"\n for i in range(1, 10):\n for j in range(1, i + 1):\n print('%dX%d=%2s ' % (j, i, i * j))\n print('\\n')\n\n\nprint99()\n",
"step-4": "#-*- coding: utf-8 -*-\n\ndef print99():\n \"\"\"\n 打印99乘法口诀表\n :return:\n \"\"\"\n for i in range(1,10):\n for j in range(1, i+1):\n print('%dX%d=%2s ' %(j,i,i*j))\n print('\\n')\nprint99()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
"""
maskAOI.py
Dan Fitch 20150618
"""
from __future__ import print_function
import sys, os, glob, shutil, fnmatch, math, re, numpy, csv
from PIL import Image, ImageFile, ImageDraw, ImageColor, ImageOps, ImageStat
ImageFile.MAXBLOCK = 1048576
DEBUG = False
AOI_DIR='/study/reference/public/IAPS/IAPS/IAPS_2008_1-20_800x600BMP/IAPS_2008_AOIs/'
IMG_DIR='/study/midus/IAPS2005png/'
SALIENCY_DIR='/home/fitch/aoi/saliency/'
SUN_SALIENCY_DIR='/home/fitch/aoi/sunsaliency/'
MASK_NAMES = ["0", "E", "1", "2", "3", "4"]
# A wrapper function to check if a string is a number (and account for negatives)
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
#Function to return only the main, averaged AOI files (the .OBT) and their coordinates.
def getCoordinates(picturename):
#Load one current image
aoiName = picturename + ".OBT"
aoiList = []
obtfile = "{0}/{1}".format(AOI_DIR, aoiName)
if not os.path.exists(obtfile):
if DEBUG: print("WARNING: No OBT file found for " + picturename)
return []
with open(obtfile) as file:
stringContent = file.readlines()
for string in stringContent:
dirtyContent = re.split(", | |=", string)
content = map(int, [ x for x in dirtyContent if RepresentsInt(x) ])
if content and content != [0]:
aoiList.append(content)
return aoiList
def drawAOI(aoi, i, d):
if aoi[0] == 1:
drawOneRect(aoi[1:5], i, d)
else:
drawOneEllipse(aoi[1:5], i, d)
# Function to display the AOI as masks
def createAOIMasks(pictureName, size):
if DEBUG: print("Displaying AOIs for picture {0}".format(pictureName))
aoiList = getCoordinates(pictureName)
if aoiList == []: return None
masks = []
# L is grayscale
img = Image.new("L", size, 0)
draw = ImageDraw.Draw(img)
for aoi in aoiList:
drawAOI(aoi, img, draw)
masks.append(img)
# Now the "emotional" masks, index 2 and up theoretically
emo = Image.new("L", size, 0)
emo_draw = ImageDraw.Draw(emo)
for aoi in aoiList[1:]:
drawAOI(aoi, emo, emo_draw)
masks.append(emo)
# Now we draw each mask individually
for aoi in aoiList:
individual = Image.new("L", size, 0)
individual_draw = ImageDraw.Draw(individual)
drawAOI(aoi, individual, individual_draw)
masks.append(individual)
return masks
def drawOneEllipse(aoi, img, draw):
#Draw one ellipse on the figure given
if DEBUG: print("Ellipse centered at [{0}, {1}] with {2} {3}".format(aoi[0], aoi[1], aoi[2], aoi[3]))
imgDim = img.size
cx=aoi[0]
cy=aoi[1]
w=2*aoi[2]
h=2*aoi[3]
imgArea=imgDim[0]*imgDim[1]
LeftX=cx-aoi[2]
RightX=cx+aoi[2]
TopY=cy-aoi[3]
BottomY=cy+aoi[3]
draw.ellipse(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def drawOneRect(aoi, img, draw):
#Draw one rectangle on the figure given
if DEBUG: print("Rectangle with Coordinates {0}".format(aoi))
imgDim = img.size
TopY=aoi[3]
BottomY=aoi[1]
LeftX=aoi[0]
RightX=aoi[2]
if DEBUG: print(" Top:{0}, Bottom:{1}, Left:{2}, Right: {3}".format(TopY, BottomY, LeftX, RightX))
imgArea=imgDim[0]*imgDim[1]
draw.rectangle(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def stat(img, mask=None):
if mask == None:
return ImageStat.Stat(img)
else:
return ImageStat.Stat(img, mask)
def brightness(img, mask=None):
return stat(img,mask).rms[0]
def luminance(c):
if len(c) < 3 or len(c) > 4:
raise Exception("Luminance got values: ", c)
r = c[0]
b = c[1]
g = c[2]
lum = r*0.2126 + g*0.7152 + b*0.0722
if len(c) == 4:
# Multiply by alpha... kind of hokey but should work for most cases
result = lum * (c[3] / 255.0)
else:
result = lum
if math.isnan(result):
return 0.0
else:
return result
def complexity(pictureName, key, img):
name = "masks/{0}-{1}.jpg".format(pictureName, key)
img.save(name, quality=80, format="JPEG", optimize=True, progressive=True)
size = os.path.getsize(name)
#os.remove(name)
return size
def results_for_mask(withColors, original, pictureName, key, mask):
# We also want the area outside of the mask
mask_inverted = ImageOps.invert(mask)
stats_mask = stat(mask)
stats_in = stat(original, mask)
stats_out = stat(original, mask_inverted)
# Complexity uses the resultant image saved as jpg, so we need to prepare some actual images
stats_in_image = Image.new('RGBA', original.size, "black")
stats_in_image.paste(original, mask=mask)
stats_out_image = Image.new('RGBA', original.size, "black")
stats_out_image.paste(original, mask=mask_inverted)
try:
if withColors:
return {
key + '_mask_lum': stats_mask.mean[0] / 256.0,
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_in_r': stats_in.mean[0] / 256.0,
key + '_in_g': stats_in.mean[1] / 256.0,
key + '_in_b': stats_in.mean[2] / 256.0,
key + '_in_complexity': complexity(pictureName, key + "in", stats_in_image),
key + '_out_lum': luminance(stats_out.mean) / 256.0,
key + '_out_r': stats_out.mean[0] / 256.0,
key + '_out_g': stats_out.mean[1] / 256.0,
key + '_out_b': stats_out.mean[2] / 256.0,
key + '_out_complexity': complexity(pictureName, key + "out", stats_out_image),
}
else:
return {
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_out_lum': luminance(stats_out.mean) / 256.0,
}
except ZeroDivisionError:
return {}
def do_saliency(original, masks, path, prefix, pictureName, results):
saliency = Image.open(path + pictureName + ".png")
if saliency.mode != "RGBA":
saliency = saliency.convert("RGBA")
saliency = saliency.resize(original.size)
stats_saliency = stat(saliency)
results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask)
results.update(stuff)
saliency_bw = saliency.convert("L")
s_array = numpy.array(saliency_bw)
m_array = numpy.array(masks[0])
dot = numpy.dot(s_array, numpy.rot90(m_array))
results[prefix + "_aoi_dotproduct_sum"] = numpy.sum(dot)
def write_stats(writer, filename, pictureName):
original = Image.open(IMG_DIR + filename)
if original.mode != "RGBA":
# P is palette. Did you know BMP *and* PNG files can have 8-bit palettes? WHAAAT
original = original.convert("RGBA")
# First, draw the AOI masks in white on black
# This returns a list, the first mask is ALL AOIs, the second is the "emotional" ones >=2, and the rest are each individual shape
masks = createAOIMasks(pictureName, original.size)
if masks == None:
print("No masks found in: " + filename)
return False
stats_orig = stat(original)
results = {
'image_name': pictureName,
'orig_lum': luminance(stats_orig.mean) / 256.0,
'orig_r': stats_orig.mean[0] / 256.0,
'orig_g': stats_orig.mean[1] / 256.0,
'orig_b': stats_orig.mean[2] / 256.0,
'orig_complexity': complexity(pictureName, "original", original),
}
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)
results.update(stuff)
# And finally we get the saliency image and resize it and do a bunch of garbage with it and the AOI masks
do_saliency(original, masks, SALIENCY_DIR, "saliency", pictureName, results)
do_saliency(original, masks, SUN_SALIENCY_DIR, "sun_saliency", pictureName, results)
writer.writerow(results)
if DEBUG: print("Generated stats for " + filename)
return True
with open('stats.csv', 'wb') as csvfile:
per_mask_fields = [
'_mask_lum',
'_in_lum',
'_in_r',
'_in_g',
'_in_b',
'_in_complexity',
'_out_lum',
'_out_r',
'_out_g',
'_out_b',
'_out_complexity',
]
per_saliency_fields = [
'_in_lum',
'_out_lum',
]
fields = [
'image_name',
'orig_lum',
'orig_r',
'orig_g',
'orig_b',
'orig_complexity',
]
for i in MASK_NAMES:
for f in per_mask_fields:
fields.append("aoi{0}{1}".format(i,f))
fields.append("saliency_aoi_dotproduct_sum")
fields.append("saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("saliency{0}{1}".format(i,f))
fields.append("sun_saliency_aoi_dotproduct_sum")
fields.append("sun_saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("sun_saliency{0}{1}".format(i,f))
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writerow(dict(zip(fields,fields)))
for filename in sorted(os.listdir(IMG_DIR)):
if not ".png" in filename:
continue
pictureName = filename.replace(".png", "")
try:
write_stats(writer, filename, pictureName)
except:
print("Error on file " + pictureName, file=sys.stderr)
raise
|
normal
|
{
"blob_id": "833053a5a75636267feaad5ddaa21dce1de34038",
"index": 5319,
"step-1": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n<mask token>\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\n<mask token>\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n<mask token>\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\n<mask token>\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef getCoordinates(picturename):\n aoiName = picturename + '.OBT'\n aoiList = []\n obtfile = '{0}/{1}'.format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG:\n print('WARNING: No OBT file found for ' + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(', | |=', string)\n content = map(int, [x for x in dirtyContent if RepresentsInt(x)])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\n<mask token>\n\n\ndef createAOIMasks(pictureName, size):\n if DEBUG:\n print('Displaying AOIs for picture {0}'.format(pictureName))\n aoiList = getCoordinates(pictureName)\n if aoiList == []:\n return None\n masks = []\n img = Image.new('L', size, 0)\n draw = ImageDraw.Draw(img)\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n masks.append(img)\n emo = Image.new('L', size, 0)\n emo_draw = ImageDraw.Draw(emo)\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n masks.append(emo)\n for aoi in aoiList:\n individual = Image.new('L', size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n return masks\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception('Luminance got values: ', c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r * 0.2126 + g * 0.7152 + b * 0.0722\n if len(c) == 4:\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n if math.isnan(result):\n return 0.0\n else:\n return result\n\n\ndef complexity(pictureName, key, img):\n name = 'masks/{0}-{1}.jpg'.format(pictureName, key)\n img.save(name, quality=80, format='JPEG', optimize=True, progressive=True)\n size = os.path.getsize(name)\n return size\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef getCoordinates(picturename):\n aoiName = picturename + '.OBT'\n aoiList = []\n obtfile = '{0}/{1}'.format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG:\n print('WARNING: No OBT file found for ' + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(', | |=', string)\n content = map(int, [x for x in dirtyContent if RepresentsInt(x)])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\ndef drawAOI(aoi, i, d):\n if aoi[0] == 1:\n drawOneRect(aoi[1:5], i, d)\n else:\n drawOneEllipse(aoi[1:5], i, d)\n\n\ndef createAOIMasks(pictureName, size):\n if DEBUG:\n print('Displaying AOIs for picture {0}'.format(pictureName))\n aoiList = getCoordinates(pictureName)\n if aoiList == []:\n return None\n masks = []\n img = Image.new('L', size, 0)\n draw = ImageDraw.Draw(img)\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n masks.append(img)\n emo = Image.new('L', size, 0)\n emo_draw = ImageDraw.Draw(emo)\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n masks.append(emo)\n for aoi in aoiList:\n individual = Image.new('L', size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n return masks\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception('Luminance got values: ', c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r * 0.2126 + g * 0.7152 + b * 0.0722\n if len(c) == 4:\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n if math.isnan(result):\n return 0.0\n else:\n return result\n\n\ndef complexity(pictureName, key, img):\n name = 'masks/{0}-{1}.jpg'.format(pictureName, key)\n img.save(name, quality=80, format='JPEG', optimize=True, progressive=True)\n size = os.path.getsize(name)\n return size\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\nmaskAOI.py\n\nDan Fitch 20150618\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys, os, glob, shutil, fnmatch, math, re, numpy, csv\nfrom PIL import Image, ImageFile, ImageDraw, ImageColor, ImageOps, ImageStat\nImageFile.MAXBLOCK = 1048576\n\nDEBUG = False\n\nAOI_DIR='/study/reference/public/IAPS/IAPS/IAPS_2008_1-20_800x600BMP/IAPS_2008_AOIs/'\nIMG_DIR='/study/midus/IAPS2005png/'\nSALIENCY_DIR='/home/fitch/aoi/saliency/'\nSUN_SALIENCY_DIR='/home/fitch/aoi/sunsaliency/'\nMASK_NAMES = [\"0\", \"E\", \"1\", \"2\", \"3\", \"4\"]\n\n\n# A wrapper function to check if a string is a number (and account for negatives)\ndef RepresentsInt(s):\n\ttry: \n\t\tint(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\t\t\n\n#Function to return only the main, averaged AOI files (the .OBT) and their coordinates.\ndef getCoordinates(picturename):\n #Load one current image\n aoiName = picturename + \".OBT\"\n aoiList = []\n obtfile = \"{0}/{1}\".format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG: print(\"WARNING: No OBT file found for \" + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(\", | |=\", string)\n content = map(int, [ x for x in dirtyContent if RepresentsInt(x) ])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\ndef drawAOI(aoi, i, d):\n if aoi[0] == 1:\n drawOneRect(aoi[1:5], i, d)\n else:\n drawOneEllipse(aoi[1:5], i, d)\n\n# Function to display the AOI as masks\ndef createAOIMasks(pictureName, size):\n if DEBUG: print(\"Displaying AOIs for picture {0}\".format(pictureName))\n aoiList = getCoordinates(pictureName)\n\n if aoiList == []: return None\n\n masks = []\n\n # L is grayscale\n img = Image.new(\"L\", size, 0)\n draw = ImageDraw.Draw(img)\n\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n\n masks.append(img)\n\n # Now the \"emotional\" masks, index 2 and up theoretically\n emo = Image.new(\"L\", size, 0)\n emo_draw = ImageDraw.Draw(emo)\n\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n\n masks.append(emo)\n\n # Now we draw each mask individually\n for aoi in aoiList:\n individual = Image.new(\"L\", size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n\n return masks\n\n\t\t\ndef drawOneEllipse(aoi, img, draw):\n #Draw one ellipse on the figure given\n if DEBUG: print(\"Ellipse centered at [{0}, {1}] with {2} {3}\".format(aoi[0], aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx=aoi[0]\n cy=aoi[1]\n w=2*aoi[2]\n h=2*aoi[3]\n imgArea=imgDim[0]*imgDim[1]\n LeftX=cx-aoi[2]\n RightX=cx+aoi[2]\n TopY=cy-aoi[3]\n BottomY=cy+aoi[3]\n draw.ellipse(((LeftX,TopY),(RightX,BottomY)), fill=\"white\", outline=\"white\")\n\t\ndef drawOneRect(aoi, img, draw):\n #Draw one rectangle on the figure given\n if DEBUG: print(\"Rectangle with Coordinates {0}\".format(aoi))\n imgDim = img.size\n TopY=aoi[3]\n BottomY=aoi[1]\n LeftX=aoi[0]\n RightX=aoi[2]\n if DEBUG: print(\" Top:{0}, Bottom:{1}, Left:{2}, Right: {3}\".format(TopY, BottomY, LeftX, RightX))\n imgArea=imgDim[0]*imgDim[1]\n draw.rectangle(((LeftX,TopY),(RightX,BottomY)), fill=\"white\", outline=\"white\")\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\ndef brightness(img, mask=None):\n return stat(img,mask).rms[0]\n\t\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception(\"Luminance got values: \", c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r*0.2126 + g*0.7152 + b*0.0722\n if len(c) == 4:\n # Multiply by alpha... kind of hokey but should work for most cases\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n\n if math.isnan(result):\n return 0.0\n else:\n return result\n\ndef complexity(pictureName, key, img):\n name = \"masks/{0}-{1}.jpg\".format(pictureName, key)\n img.save(name, quality=80, format=\"JPEG\", optimize=True, progressive=True)\n size = os.path.getsize(name)\n #os.remove(name)\n return size\n\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n # We also want the area outside of the mask\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n\n # Complexity uses the resultant image saved as jpg, so we need to prepare some actual images\n\n stats_in_image = Image.new('RGBA', original.size, \"black\")\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, \"black\")\n stats_out_image.paste(original, mask=mask_inverted)\n\n try:\n if withColors:\n return {\n key + '_mask_lum': stats_mask.mean[0] / 256.0,\n key + '_in_lum': luminance(stats_in.mean) / 256.0,\n key + '_in_r': stats_in.mean[0] / 256.0,\n key + '_in_g': stats_in.mean[1] / 256.0,\n key + '_in_b': stats_in.mean[2] / 256.0,\n key + '_in_complexity': complexity(pictureName, key + \"in\", stats_in_image),\n key + '_out_lum': luminance(stats_out.mean) / 256.0,\n key + '_out_r': stats_out.mean[0] / 256.0,\n key + '_out_g': stats_out.mean[1] / 256.0,\n key + '_out_b': stats_out.mean[2] / 256.0,\n key + '_out_complexity': complexity(pictureName, key + \"out\", stats_out_image),\n }\n else:\n return {\n key + '_in_lum': luminance(stats_in.mean) / 256.0,\n key + '_out_lum': luminance(stats_out.mean) / 256.0,\n }\n except ZeroDivisionError:\n return {}\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + \".png\")\n if saliency.mode != \"RGBA\":\n saliency = saliency.convert(\"RGBA\")\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask)\n results.update(stuff)\n\n saliency_bw = saliency.convert(\"L\")\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n\n results[prefix + \"_aoi_dotproduct_sum\"] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n\n original = Image.open(IMG_DIR + filename)\n\n if original.mode != \"RGBA\":\n # P is palette. Did you know BMP *and* PNG files can have 8-bit palettes? WHAAAT\n original = original.convert(\"RGBA\")\n\n # First, draw the AOI masks in white on black\n # This returns a list, the first mask is ALL AOIs, the second is the \"emotional\" ones >=2, and the rest are each individual shape\n masks = createAOIMasks(pictureName, original.size)\n\n if masks == None:\n print(\"No masks found in: \" + filename)\n return False\n\n stats_orig = stat(original)\n\n results = {\n 'image_name': pictureName,\n 'orig_lum': luminance(stats_orig.mean) / 256.0,\n 'orig_r': stats_orig.mean[0] / 256.0,\n 'orig_g': stats_orig.mean[1] / 256.0,\n 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, \"original\", original),\n }\n\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n\n # And finally we get the saliency image and resize it and do a bunch of garbage with it and the AOI masks\n\n do_saliency(original, masks, SALIENCY_DIR, \"saliency\", pictureName, results)\n do_saliency(original, masks, SUN_SALIENCY_DIR, \"sun_saliency\", pictureName, results)\n\n\n writer.writerow(results)\n if DEBUG: print(\"Generated stats for \" + filename)\n return True\n\n\n\nwith open('stats.csv', 'wb') as csvfile:\n per_mask_fields = [\n '_mask_lum',\n '_in_lum',\n '_in_r',\n '_in_g',\n '_in_b',\n '_in_complexity',\n '_out_lum',\n '_out_r',\n '_out_g',\n '_out_b',\n '_out_complexity',\n ]\n\n per_saliency_fields = [\n '_in_lum',\n '_out_lum',\n ]\n\n fields = [\n 'image_name',\n 'orig_lum',\n 'orig_r',\n 'orig_g',\n 'orig_b',\n 'orig_complexity',\n ]\n\n for i in MASK_NAMES:\n for f in per_mask_fields:\n fields.append(\"aoi{0}{1}\".format(i,f))\n\n fields.append(\"saliency_aoi_dotproduct_sum\")\n fields.append(\"saliency_lum\")\n\n for i in MASK_NAMES:\n for f in per_saliency_fields:\n fields.append(\"saliency{0}{1}\".format(i,f))\n\n fields.append(\"sun_saliency_aoi_dotproduct_sum\")\n fields.append(\"sun_saliency_lum\")\n\n for i in MASK_NAMES:\n for f in per_saliency_fields:\n fields.append(\"sun_saliency{0}{1}\".format(i,f))\n\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writerow(dict(zip(fields,fields)))\n\n for filename in sorted(os.listdir(IMG_DIR)):\n if not \".png\" in filename:\n continue\n\n pictureName = filename.replace(\".png\", \"\")\n\n try:\n write_stats(writer, filename, pictureName)\n\n except:\n print(\"Error on file \" + pictureName, file=sys.stderr)\n raise\n\n\n",
"step-ids": [
6,
8,
12,
13,
17
]
}
|
[
6,
8,
12,
13,
17
] |
import sys
import os
sys.path.append(os.pardir)
from ch03.softmax import softmax
from ch04.cross_entropy_error_batch import cross_entropy_error
import numpy as np
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None # output from Softmax
self.t = None # teacher data
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
if __name__ == '__main__':
softmax_with_loss_layer = SoftmaxWithLossLayer()
# forward(non-batch)
x = np.array([5, 1, 0]) # x is like t
t = np.array([1, 0, 0])
loss = softmax_with_loss_layer.forward(x, t)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
# forward(batch)
xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]]) # x[1] and x[2] have large difference with t
ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
loss = softmax_with_loss_layer.forward(xs, ts)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
|
normal
|
{
"blob_id": "8ae64c65d6d5dc9f2a99aeceff31657deff06c15",
"index": 5236,
"step-1": "<mask token>\n\n\nclass SoftmaxWithLossLayer:\n <mask token>\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n x = np.array([5, 1, 0])\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-4": "import sys\nimport os\nsys.path.append(os.pardir)\nfrom ch03.softmax import softmax\nfrom ch04.cross_entropy_error_batch import cross_entropy_error\nimport numpy as np\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n x = np.array([5, 1, 0])\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-5": "import sys\nimport os\nsys.path.append(os.pardir)\nfrom ch03.softmax import softmax\nfrom ch04.cross_entropy_error_batch import cross_entropy_error\nimport numpy as np\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None # output from Softmax\n self.t = None # teacher data\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n\n # forward(non-batch)\n x = np.array([5, 1, 0]) # x is like t\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n\n # backward\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n\n # forward(batch)\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]]) # x[1] and x[2] have large difference with t\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n\n # backward\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Counts number of dumbbell curls in the video
import cv2
import mediapipe as mp
import base
import math
import numpy as np
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5,
outFile="output.mp4", outWidth=720, outHeight=1280):
super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1,y1 = self.lms[p1][1:]
x2,y2 = self.lms[p2][1:]
x3,y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))
if angle<0:
angle += 360
if draw:
cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)
cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)
cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x1,y1), 12, (0,0,255), 2)
cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x2,y2), 12, (0,0,255), 2)
cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x3,y3), 12, (0,0,255), 2)
cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210,320), (0,100))
color = (0,255,0)
if perc > 95:
color = (0,0,255)
if self.dir == 0:
self.count += .5
self.dir = 1
if perc == 0:
color = (255,0,0)
if self.dir == 1:
self.count += .5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)
bar = np.interp(perc, (0,100), (800,200))
cv2.rectangle(img, (50,200), (100,800), color, 3)
cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)
def main():
cap = cv2.VideoCapture("media/1.mp4")
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms)>28:
estimator.countReps(img,11,13,15)
# estimator.writeFrame(img)
cv2.imshow("Correct Pose Estimation", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "4a886437727ed6b48206e12b686a59a1d2a1c489",
"index": 4948,
"step-1": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n <mask token>\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import cv2\nimport mediapipe as mp\nimport base\nimport math\nimport numpy as np\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Counts number of dumbbell curls in the video \n\nimport cv2 \nimport mediapipe as mp \nimport base\nimport math\nimport numpy as np\n\nclass PoseEstimator(base.PoseDetector): \n def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5, \n outFile=\"output.mp4\", outWidth=720, outHeight=1280):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True): \n x1,y1 = self.lms[p1][1:]\n x2,y2 = self.lms[p2][1:]\n x3,y3 = self.lms[p3][1:]\n\n angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))\n if angle<0: \n angle += 360\n\n if draw: \n cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)\n cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)\n cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x1,y1), 12, (0,0,255), 2)\n cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x2,y2), 12, (0,0,255), 2)\n cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x3,y3), 12, (0,0,255), 2)\n cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)\n\n return angle \n \n def countReps(self, img, p1, p2, p3): \n angle = self.findAngle(img, p1, p2, p3) \n perc = np.interp(angle, (210,320), (0,100))\n \n color = (0,255,0)\n if perc > 95: \n color = (0,0,255)\n if self.dir == 0: \n self.count += .5 \n self.dir = 1\n if perc == 0: \n color = (255,0,0)\n if self.dir == 1: \n self.count += .5\n self.dir = 0 \n \n cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)\n\n bar = np.interp(perc, (0,100), (800,200))\n cv2.rectangle(img, (50,200), (100,800), color, 3)\n cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture(\"media/1.mp4\") \n estimator = PoseEstimator()\n\n while True: \n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n\n img = estimator.findPose(img) \n lms = estimator.findPosition(img, draw=False) \n if len(lms)>28: \n estimator.countReps(img,11,13,15)\n\n # estimator.writeFrame(img)\n\n cv2.imshow(\"Correct Pose Estimation\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\nif __name__ == \"__main__\": \n main() ",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
import pickle as pk
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == "LinearRegression":
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Lasso":
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Ridge":
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "tree":
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = "Lasso"
train_model(model_chosen)
|
normal
|
{
"blob_id": "539726df0e631c7a8edabf50fd739ee0497e3e97",
"index": 5557,
"step-1": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-3": "<mask token>\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-4": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-5": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\n\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == \"LinearRegression\":\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Lasso\":\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Ridge\":\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"tree\":\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = \"Lasso\"\n train_model(model_chosen)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
s=input("enter a string")
u=0
l=0
for i in s:
if i.isupper():
u+=1
elif i.islower():
l+=1
print(u,l,end="")
|
normal
|
{
"blob_id": "bbb23d606b081d2591699cb6b9336c8766eea5b2",
"index": 2436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in s:\n if i.isupper():\n u += 1\n elif i.islower():\n l += 1\nprint(u, l, end='')\n",
"step-3": "s = input('enter a string')\nu = 0\nl = 0\nfor i in s:\n if i.isupper():\n u += 1\n elif i.islower():\n l += 1\nprint(u, l, end='')\n",
"step-4": "s=input(\"enter a string\")\nu=0\nl=0\nfor i in s:\n if i.isupper():\n u+=1\n elif i.islower():\n l+=1\n \nprint(u,l,end=\"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import psyco
sys.stdin = open("/home/shiva/Learning/1.txt", "r")
sys.stdout = open("/home/shiva/Learning/2.txt", "w")
def compute(plus,minus,total,inp):
if plus == 1 and minus == 0:
print(total); return
elif (plus == 1 and minus == 1):
print("Impossible"); return
elif (abs(plus-minus) > total):
plus
temp = total
total += minus
res = []
if int(total/plus) > temp:
print("Impossible"); return
elif int(total%plus) == 0:
res = [int(total/plus) for i in range(0,plus)]
else:
res = [int(total/(plus-1)) for i in range(0,plus-1)]
res.append(total%(plus-1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j],end=' ')
j+=1
elif i == '+' or i=='-':
print(i,end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j],end=' ')
j+=1
else:
print('1',end=' ')
else:
print(i,end=' ')
inp = input()
plus =1
minus = 0
total = 0
for i in inp.split():
if i=='?' or i=='=':
continue
elif i == '+':
plus+=1
elif i == '-':
minus +=1
else:
total = int(i)
compute(plus,minus,total,inp)
|
normal
|
{
"blob_id": "d29c8ec737b8e962d381c8fdd0999e7e01847836",
"index": 5274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\n<mask token>\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-3": "<mask token>\nsys.stdin = open('/home/shiva/Learning/1.txt', 'r')\nsys.stdout = open('/home/shiva/Learning/2.txt', 'w')\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\ninp = input()\nplus = 1\nminus = 0\ntotal = 0\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-4": "import sys\nimport psyco\nsys.stdin = open('/home/shiva/Learning/1.txt', 'r')\nsys.stdout = open('/home/shiva/Learning/2.txt', 'w')\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\ninp = input()\nplus = 1\nminus = 0\ntotal = 0\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-5": "import sys\nimport psyco\nsys.stdin = open(\"/home/shiva/Learning/1.txt\", \"r\")\nsys.stdout = open(\"/home/shiva/Learning/2.txt\", \"w\")\n\ndef compute(plus,minus,total,inp):\n\tif plus == 1 and minus == 0:\n\t\tprint(total); return\n\telif (plus == 1 and minus == 1): \n\t\tprint(\"Impossible\"); return\n\telif (abs(plus-minus) > total):\n\t\tplus\n\n\ttemp = total\n\ttotal += minus\n\tres = []\n\tif int(total/plus) > temp:\n\t\tprint(\"Impossible\"); return\n\telif int(total%plus) == 0:\n\t\tres = [int(total/plus) for i in range(0,plus)]\n\telse:\n\t\tres = [int(total/(plus-1)) for i in range(0,plus-1)]\n\t\tres.append(total%(plus-1))\n\t\n\tj = 0\n\tprev = 0\n\tfor i in inp.split():\n\t\tif j == 0:\n\t\t\tprint(res[j],end=' ')\n\t\t\tj+=1\n\t\telif i == '+' or i=='-':\n\t\t\tprint(i,end=' ')\n\t\t\tprev = i\n\t\telif i == '?':\n\t\t\tif prev == '+':\n\n\t\t\t\tprint(res[j],end=' ')\n\t\t\t\tj+=1\n\t\t\telse:\n\t\t\t\tprint('1',end=' ')\n\t\telse:\n\t\t\tprint(i,end=' ')\n\ninp = input()\nplus =1\nminus = 0\ntotal = 0\nfor i in inp.split():\n\tif i=='?' or i=='=':\n\t\tcontinue\n\telif i == '+':\n\t\tplus+=1\n\telif i == '-':\n\t\tminus +=1\n\telse:\n\t\ttotal = int(i)\n\ncompute(plus,minus,total,inp)\n\n\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import sqlite3
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect("earnings.db", timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name("lfkTWp")
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn
)
revenue_history_df = pd.read_sql("select * from estimize_revenue", conn)
price_history_df = pd.read_sql("select * from price_history", conn)
def get_combined_df(eps_df, revenue_df):
del eps_df["Historical Beat Rate"]
del revenue_df["Historical Beat Rate"]
date_reported_df = eps_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
eps_df["Date Reported"] = date_reported_df["Date Reported"]
eps_df["Time Reported"] = date_reported_df["Time Reported"]
date_reported_df = revenue_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
revenue_df["Date Reported"] = date_reported_df["Date Reported"]
revenue_df["Time Reported"] = date_reported_df["Time Reported"]
eps_df = eps_df.sort_values(by="Date Reported")
revenue_df = revenue_df.sort_values(by="Date Reported")
eps_df = eps_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
revenue_df = revenue_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
eps_df.columns = "EPS " + eps_df.columns
revenue_df.columns = "Revenue " + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df["Historical EPS Beat Ratio"] = None
df["Historical EPS Beat Percent"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values("Symbol") == symbol]
beat_rate = this_df[
this_df.index.get_level_values("Date Reported") <= date_reported
].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate["EPS Surprise"] > 0]) / float(
len(beat_rate)
)
beat_rate_percent = beat_rate["EPS Surprise"] / beat_rate["EPS Actual"]
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
# TODO: Do the same for revenue
df.loc[index_num, ["Historical EPS Beat Ratio"]] = beat_rate_ratio
df.loc[index_num, ["Historical EPS Beat Percent"]] = beat_rate_percent
def get_average_change():
df["Average Change 5 Days"] = None
df["Average Abnormal Change 5 Days"] = None
df["Average Change 10 Days"] = None
df["Average Abnormal Change 10 Days"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[
df.index.get_level_values("Date Reported") < date_reported
].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ["Average Change 5 Days"]] = returns_df[
"5 Day Change"
].mean()
df.loc[index_num, ["Average Change 10 Days"]] = returns_df[
"10 Day Change"
].mean()
df.loc[index_num, ["Average Abnormal Change 5 Days"]] = returns_df[
"5 Day Change Abnormal"
].mean()
df.loc[index_num, ["Average Abnormal Change 10 Days"]] = returns_df[
"10 Day Change Abnormal"
].mean()
def get_YoY_growth():
df["YoY Growth"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", "")
quarter_numer, year = time_reported.split(" ")
this_df = df["EPS Actual"]
try:
this_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + year
].values[0]
last_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + str(int(year) - 1)
].values[0]
df.loc[index_num, ["YoY Growth"]] = (
this_quarter - last_quarter
) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get("https://finviz.com/quote.ashx?t=%s" % symbol)
soup = BeautifulSoup(finviz_page.text, features="lxml")
table_row = soup.findAll("tr", attrs={"class": "table-dark-row"})[1]
market_cap = table_row.text.replace("Market Cap", "").split("\n")[1]
if "K" in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif "M" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif "B" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = "Large"
elif market_cap > 2000000000:
market_cap_text = "Medium"
elif market_cap > 300000000:
market_cap_text = "Small"
elif market_cap > 50000000:
market_cap_text = "Micro"
else:
market_cap_text = "Nano"
df["Market Cap Text"] = market_cap_text
def get_estimize_data(self):
# request the estimize website for data
url = "https://www.estimize.com/calendar?tab=equity&date=" + datetime.now().strftime(
"%Y-%m-%d"
)
self.driver.get(url)
# check if there are no companies reporting earnings
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.CLASS_NAME, "dAViVi"))
)
companies_reporting_div = self.driver.find_element_by_class_name("dAViVi")
if "0 Events" == companies_reporting_div.text.split("\n")[1]:
return
# method to extra the ticker symbols from the webpage
tickers = self.get_tickers()
# method to get the historical data from yahoo
# self.get_yahoo_historical(tickers)
# TODO: update price history table with missing yahoo price data entries
# read the table and make a dataframe out of it
eps_df = pd.read_html(self.driver.page_source)[0]
eps_df["Symbol"] = tickers
eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
eps_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
# same as above, but for revenues table instead of EPS table
url = (
"https://www.estimize.com/calendar?tab=equity&metric=revenue&date="
+ self.read_date.strftime("%Y-%m-%d")
)
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.TAG_NAME, "table"))
)
revenue_df = pd.read_html(self.driver.page_source)[0]
tickers = self.get_tickers()
revenue_df["Symbol"] = tickers
revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
revenue_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
return eps_df, revenue_df
def get_tickers(self):
# extract ticker symbopls from the html source
soup = BeautifulSoup(self.driver.page_source, features="lxml")
ticker_links = soup.findAll("a", attrs={"class": "lfkTWp"})
# create list of symbols that were extracted
tickers = []
for ticker in ticker_links:
tickers.append(ticker.contents[0])
return tickers
|
normal
|
{
"blob_id": "b7738c27e11e9566d90157717633312031cdffd6",
"index": 818,
"step-1": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\ndef get_estimize_data(self):\n url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(\n ).strftime('%Y-%m-%d')\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'dAViVi')))\n companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')\n if '0 Events' == companies_reporting_div.text.split('\\n')[1]:\n return\n tickers = self.get_tickers()\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df['Symbol'] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n url = (\n 'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +\n self.read_date.strftime('%Y-%m-%d'))\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.TAG_NAME, 'table')))\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df['Symbol'] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n return eps_df, revenue_df\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\ndef get_estimize_data(self):\n url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(\n ).strftime('%Y-%m-%d')\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'dAViVi')))\n companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')\n if '0 Events' == companies_reporting_div.text.split('\\n')[1]:\n return\n tickers = self.get_tickers()\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df['Symbol'] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n url = (\n 'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +\n self.read_date.strftime('%Y-%m-%d'))\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.TAG_NAME, 'table')))\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df['Symbol'] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n return eps_df, revenue_df\n\n\ndef get_tickers(self):\n soup = BeautifulSoup(self.driver.page_source, features='lxml')\n ticker_links = soup.findAll('a', attrs={'class': 'lfkTWp'})\n tickers = []\n for ticker in ticker_links:\n tickers.append(ticker.contents[0])\n return tickers\n",
"step-5": "import sqlite3\n\n\nclass announcement:\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect(\"earnings.db\", timeout=120)\n cur = conn.cursor()\n\n symbol_href = self.driver.find_element_by_class_name(\"lfkTWp\")\n symbol = symbol_href.text\n\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn\n )\n revenue_history_df = pd.read_sql(\"select * from estimize_revenue\", conn)\n price_history_df = pd.read_sql(\"select * from price_history\", conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df[\"Historical Beat Rate\"]\n del revenue_df[\"Historical Beat Rate\"]\n\n date_reported_df = eps_df[\"Date Reported\"].str.split(\" \", n=1, expand=True)\n date_reported_df = date_reported_df.rename(\n columns={0: \"Date Reported\", 1: \"Time Reported\"}\n )\n date_reported_df[\"Date Reported\"] = pd.to_datetime(\n date_reported_df[\"Date Reported\"]\n )\n eps_df[\"Date Reported\"] = date_reported_df[\"Date Reported\"]\n eps_df[\"Time Reported\"] = date_reported_df[\"Time Reported\"]\n\n date_reported_df = revenue_df[\"Date Reported\"].str.split(\" \", n=1, expand=True)\n date_reported_df = date_reported_df.rename(\n columns={0: \"Date Reported\", 1: \"Time Reported\"}\n )\n date_reported_df[\"Date Reported\"] = pd.to_datetime(\n date_reported_df[\"Date Reported\"]\n )\n revenue_df[\"Date Reported\"] = date_reported_df[\"Date Reported\"]\n revenue_df[\"Time Reported\"] = date_reported_df[\"Time Reported\"]\n\n eps_df = eps_df.sort_values(by=\"Date Reported\")\n revenue_df = revenue_df.sort_values(by=\"Date Reported\")\n\n eps_df = eps_df.set_index(\n [\"Date Reported\", \"Time Reported\", \"Symbol\"], append=True, drop=True\n )\n revenue_df = revenue_df.set_index(\n [\"Date Reported\", \"Time Reported\", \"Symbol\"], append=True, drop=True\n )\n\n eps_df.columns = \"EPS \" + eps_df.columns\n revenue_df.columns = \"Revenue \" + revenue_df.columns\n\n df = eps_df.join(revenue_df)\n\n return df\n\n def get_historical_beat():\n df[\"Historical EPS Beat Ratio\"] = None\n df[\"Historical EPS Beat Percent\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n\n this_df = df[df.index.get_level_values(\"Symbol\") == symbol]\n beat_rate = this_df[\n this_df.index.get_level_values(\"Date Reported\") <= date_reported\n ].tail(8)\n\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate[\"EPS Surprise\"] > 0]) / float(\n len(beat_rate)\n )\n beat_rate_percent = beat_rate[\"EPS Surprise\"] / beat_rate[\"EPS Actual\"]\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n\n # TODO: Do the same for revenue\n df.loc[index_num, [\"Historical EPS Beat Ratio\"]] = beat_rate_ratio\n df.loc[index_num, [\"Historical EPS Beat Percent\"]] = beat_rate_percent\n\n def get_average_change():\n df[\"Average Change 5 Days\"] = None\n df[\"Average Abnormal Change 5 Days\"] = None\n df[\"Average Change 10 Days\"] = None\n df[\"Average Abnormal Change 10 Days\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n\n returns_df = df[\n df.index.get_level_values(\"Date Reported\") < date_reported\n ].tail(8)\n\n if len(returns_df) >= 4:\n df.loc[index_num, [\"Average Change 5 Days\"]] = returns_df[\n \"5 Day Change\"\n ].mean()\n df.loc[index_num, [\"Average Change 10 Days\"]] = returns_df[\n \"10 Day Change\"\n ].mean()\n df.loc[index_num, [\"Average Abnormal Change 5 Days\"]] = returns_df[\n \"5 Day Change Abnormal\"\n ].mean()\n df.loc[index_num, [\"Average Abnormal Change 10 Days\"]] = returns_df[\n \"10 Day Change Abnormal\"\n ].mean()\n\n def get_YoY_growth():\n df[\"YoY Growth\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", \"\")\n quarter_numer, year = time_reported.split(\" \")\n\n this_df = df[\"EPS Actual\"]\n try:\n this_quarter = this_df[\n this_df.index.get_level_values(\"Time Reported\")\n == quarter_numer + \" '\" + year\n ].values[0]\n last_quarter = this_df[\n this_df.index.get_level_values(\"Time Reported\")\n == quarter_numer + \" '\" + str(int(year) - 1)\n ].values[0]\n df.loc[index_num, [\"YoY Growth\"]] = (\n this_quarter - last_quarter\n ) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get(\"https://finviz.com/quote.ashx?t=%s\" % symbol)\n\n soup = BeautifulSoup(finviz_page.text, features=\"lxml\")\n table_row = soup.findAll(\"tr\", attrs={\"class\": \"table-dark-row\"})[1]\n market_cap = table_row.text.replace(\"Market Cap\", \"\").split(\"\\n\")[1]\n if \"K\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif \"M\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif \"B\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = \"Large\"\n elif market_cap > 2000000000:\n market_cap_text = \"Medium\"\n elif market_cap > 300000000:\n market_cap_text = \"Small\"\n elif market_cap > 50000000:\n market_cap_text = \"Micro\"\n else:\n market_cap_text = \"Nano\"\n\n df[\"Market Cap Text\"] = market_cap_text\n\n\ndef get_estimize_data(self):\n # request the estimize website for data\n url = \"https://www.estimize.com/calendar?tab=equity&date=\" + datetime.now().strftime(\n \"%Y-%m-%d\"\n )\n self.driver.get(url)\n\n # check if there are no companies reporting earnings\n myElem = WebDriverWait(self.driver, self.delay).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"dAViVi\"))\n )\n companies_reporting_div = self.driver.find_element_by_class_name(\"dAViVi\")\n if \"0 Events\" == companies_reporting_div.text.split(\"\\n\")[1]:\n return\n\n # method to extra the ticker symbols from the webpage\n tickers = self.get_tickers()\n\n # method to get the historical data from yahoo\n # self.get_yahoo_historical(tickers)\n # TODO: update price history table with missing yahoo price data entries\n\n # read the table and make a dataframe out of it\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df[\"Symbol\"] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = [\n \"Date Reported\",\n \"Num of Estimates\",\n \"Delta\",\n \"Surprise\",\n \"Historical Beat Rate\",\n \"Wall St\",\n \"Estimize\",\n \"Actual\",\n \"Symbol\",\n ]\n\n # same as above, but for revenues table instead of EPS table\n url = (\n \"https://www.estimize.com/calendar?tab=equity&metric=revenue&date=\"\n + self.read_date.strftime(\"%Y-%m-%d\")\n )\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(\n EC.presence_of_element_located((By.TAG_NAME, \"table\"))\n )\n\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df[\"Symbol\"] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = [\n \"Date Reported\",\n \"Num of Estimates\",\n \"Delta\",\n \"Surprise\",\n \"Historical Beat Rate\",\n \"Wall St\",\n \"Estimize\",\n \"Actual\",\n \"Symbol\",\n ]\n\n return eps_df, revenue_df\n\n\ndef get_tickers(self):\n # extract ticker symbopls from the html source\n soup = BeautifulSoup(self.driver.page_source, features=\"lxml\")\n ticker_links = soup.findAll(\"a\", attrs={\"class\": \"lfkTWp\"})\n\n # create list of symbols that were extracted\n tickers = []\n for ticker in ticker_links:\n tickers.append(ticker.contents[0])\n\n return tickers\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
#!/usr/bin/python3
def divisible_by_2(my_list=[]):
if my_list is None or len(my_list) == 0:
return None
new = []
for num in my_list:
if num % 2 == 0:
new.append(True)
else:
new.append(False)
return new
|
normal
|
{
"blob_id": "17f91b612fad14200d2911e2cb14e740b239f9ff",
"index": 4894,
"step-1": "<mask token>\n",
"step-2": "def divisible_by_2(my_list=[]):\n if my_list is None or len(my_list) == 0:\n return None\n new = []\n for num in my_list:\n if num % 2 == 0:\n new.append(True)\n else:\n new.append(False)\n return new\n",
"step-3": "#!/usr/bin/python3\ndef divisible_by_2(my_list=[]):\n if my_list is None or len(my_list) == 0:\n return None\n new = []\n for num in my_list:\n if num % 2 == 0:\n new.append(True)\n else:\n new.append(False)\n return new\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from ethereum.common import mk_transaction_sha, mk_receipt_sha
from ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \
InsufficientStartGas, InvalidNonce, UnsignedTransaction
from ethereum.messages import apply_transaction
from ethereum.slogging import get_logger
from ethereum.utils import encode_hex
from sharding.receipt_consuming_tx_utils import apply_shard_transaction
from sharding.collation import Collation, CollationHeader
log = get_logger('sharding.shard_state_transition')
def mk_collation_from_prevstate(shard_chain, state, coinbase):
"""Make collation from previous state
(refer to ethereum.common.mk_block_from_prevstate)
"""
# state = state or shard_chain.state
collation = Collation(CollationHeader())
collation.header.shard_id = shard_chain.shard_id
collation.header.prev_state_root = state.trie.root_hash
collation.header.coinbase = coinbase
collation.transactions = []
return collation
def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):
"""Add transactions to a collation
(refer to ethereum.common.add_transactions)
"""
if not txqueue:
return
pre_txs = len(collation.transactions)
log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))
while 1:
tx = txqueue.pop_transaction(
max_gas=shard_state.gas_limit - shard_state.gas_used,
min_gasprice=min_gasprice
)
if tx is None:
break
try:
apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)
collation.transactions.append(tx)
except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,
InvalidNonce, UnsignedTransaction) as e:
log.info(str(e))
pass
log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))
def update_collation_env_variables(state, collation):
"""Update collation variables into the state
(refer to ethereum.common.update_block_env_variables)
"""
state.block_coinbase = collation.header.coinbase
def set_execution_results(state, collation):
"""Set state root, receipt root, etc
(ethereum.pow.common.set_execution_results)
"""
collation.header.receipts_root = mk_receipt_sha(state.receipts)
collation.header.tx_list_root = mk_transaction_sha(collation.transactions)
# Notice: commit state before assigning
state.commit()
collation.header.post_state_root = state.trie.root_hash
# TODO: Don't handle in basic sharding currently
# block.header.gas_used = state.gas_used
# block.header.bloom = state.bloom
log.info('Collation pre-sealed, %d gas used' % state.gas_used)
def validate_transaction_tree(collation):
"""Validate that the transaction list root is correct
(refer to ethereum.common.validate_transaction_tree)
"""
if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):
raise ValueError("Transaction root mismatch: header %s computed %s, %d transactions" %
(encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),
len(collation.transactions)))
return True
def verify_execution_results(state, collation):
"""Verify the results by Merkle Proof
(refer to ethereum.common.verify_execution_results)
"""
state.commit()
validate_transaction_tree(collation)
if collation.header.post_state_root != state.trie.root_hash:
raise ValueError('State root mismatch: header %s computed %s' %
(encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))
if collation.header.receipts_root != mk_receipt_sha(state.receipts):
raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %
(encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),
state.gas_used, len(state.receipts)))
return True
def finalize(state, coinbase):
"""Apply rewards and commit
(refer to ethereum.pow.consensus.finalize)
"""
delta = int(state.config['COLLATOR_REWARD'])
state.delta_balance(coinbase, delta)
|
normal
|
{
"blob_id": "e364ba45513167966fe50e31a01f552ccedec452",
"index": 6552,
"step-1": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-2": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-3": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-4": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n validate_transaction_tree(collation)\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' % (\n encode_hex(collation.header.post_state_root), encode_hex(state.\n trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError(\n 'Receipt root mismatch: header %s computed %s, computed %d, %d receipts'\n % (encode_hex(collation.header.receipts_root), encode_hex(\n mk_receipt_sha(state.receipts)), state.gas_used, len(state.\n receipts)))\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-5": "from ethereum.common import mk_transaction_sha, mk_receipt_sha\nfrom ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \\\n InsufficientStartGas, InvalidNonce, UnsignedTransaction\nfrom ethereum.messages import apply_transaction\nfrom ethereum.slogging import get_logger\nfrom ethereum.utils import encode_hex\n\nfrom sharding.receipt_consuming_tx_utils import apply_shard_transaction\nfrom sharding.collation import Collation, CollationHeader\n\nlog = get_logger('sharding.shard_state_transition')\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n # state = state or shard_chain.state\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(\n max_gas=shard_state.gas_limit - shard_state.gas_used,\n min_gasprice=min_gasprice\n )\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,\n InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n\n # Notice: commit state before assigning\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n\n # TODO: Don't handle in basic sharding currently\n # block.header.gas_used = state.gas_used\n # block.header.bloom = state.bloom\n\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):\n raise ValueError(\"Transaction root mismatch: header %s computed %s, %d transactions\" %\n (encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),\n len(collation.transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n\n validate_transaction_tree(collation)\n\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' %\n (encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %\n (encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),\n state.gas_used, len(state.receipts)))\n\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
import re
import sys
import zipfile
import pathlib
from typing import IO, Any
from collections.abc import Mapping
import numpy.typing as npt
import numpy as np
from numpy.lib._npyio_impl import BagObj
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) -> None: ...
class BytesReader:
def read(self, n: int = ...) -> bytes: ...
def seek(self, offset: int, whence: int = ...) -> int: ...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file["test"], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding="bytes"), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any])
assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any])
|
normal
|
{
"blob_id": "e2f134f5ff00405396b8bbf4edc263b70ef5d972",
"index": 2435,
"step-1": "<mask token>\n\n\nclass BytesWriter:\n <mask token>\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\nnpz_file: np.lib.npyio.NpzFile\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file['test'], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding='bytes'), Any)\nassert_type(np.load(bytes_reader), Any)\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter='\\n'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,\n encoding='utf8'), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]\n )\nassert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter='\\n'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])\n",
"step-4": "import re\nimport sys\nimport zipfile\nimport pathlib\nfrom typing import IO, Any\nfrom collections.abc import Mapping\nimport numpy.typing as npt\nimport numpy as np\nfrom numpy.lib._npyio_impl import BagObj\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\nnpz_file: np.lib.npyio.NpzFile\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file['test'], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding='bytes'), Any)\nassert_type(np.load(bytes_reader), Any)\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter='\\n'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,\n encoding='utf8'), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]\n )\nassert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter='\\n'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])\n",
"step-5": "import re\nimport sys\nimport zipfile\nimport pathlib\nfrom typing import IO, Any\nfrom collections.abc import Mapping\n\nimport numpy.typing as npt\nimport numpy as np\nfrom numpy.lib._npyio_impl import BagObj\n\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\n\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\n\nnpz_file: np.lib.npyio.NpzFile\n\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\nclass BytesWriter:\n def write(self, data: bytes) -> None: ...\n\nclass BytesReader:\n def read(self, n: int = ...) -> bytes: ...\n def seek(self, offset: int, whence: int = ...) -> int: ...\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\n\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file[\"test\"], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\n\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding=\"bytes\"), Any)\nassert_type(np.load(bytes_reader), Any)\n\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\n\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\n\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\n\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments=\"test\"), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter=\"\\n\"), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt([\"1\", \"2\", \"3\"]), npt.NDArray[np.float64])\n\nassert_type(np.fromregex(bytes_file, \"test\", np.float64), npt.NDArray[np.float64])\nassert_type(np.fromregex(str_file, b\"test\", dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile(\"test\"), dtype=np.str_, encoding=\"utf8\"), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, \"test\", np.float64), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_reader, \"test\", np.float64), npt.NDArray[np.float64])\n\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_file, comments=\"test\"), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter=\"\\n\"), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt([\"1\", \"2\", \"3\"], ndmin=2), npt.NDArray[Any])\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.utils.text import slugify
from pyexpat import model
from django.db import models
# Create your models here.
from rest_framework_simplejwt.state import User
FREQUENCY = (
('daily', 'Diario'),
('weekly', 'Semanal'),
('monthly', 'Mensual')
)
class Tags(models.Model):
name = models.CharField(max_length=100)
slug = models.CharField(max_length=150)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def save(self, *arg, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Tags, self).save(*arg, **kwargs)
class Meta:
ordering = ('-created_at',)
class Newsletter(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=10000)
image = models.ImageField()
target = models.IntegerField()
frequency = models.CharField(max_length=10, choices=FREQUENCY, default='monthly')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
tag = models.ManyToManyField(Tags)
@property
def subscribed(self):
return 10
def __str__(self):
return self.name
class Meta:
ordering = ('-created_at',)
|
normal
|
{
"blob_id": "71503282e58f60e0936a5236edc094f1da937422",
"index": 6565,
"step-1": "<mask token>\n\n\nclass Tags(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = '-created_at',\n\n\nclass Newsletter(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=10000)\n image = models.ImageField()\n target = models.IntegerField()\n frequency = models.CharField(max_length=10, choices=FREQUENCY, default=\n 'monthly')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n tag = models.ManyToManyField(Tags)\n\n @property\n def subscribed(self):\n return 10\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = '-created_at',\n",
"step-2": "<mask token>\n\n\nclass Tags(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n def save(self, *arg, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Tags, self).save(*arg, **kwargs)\n\n\n class Meta:\n ordering = '-created_at',\n\n\nclass Newsletter(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=10000)\n image = models.ImageField()\n target = models.IntegerField()\n frequency = models.CharField(max_length=10, choices=FREQUENCY, default=\n 'monthly')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n tag = models.ManyToManyField(Tags)\n\n @property\n def subscribed(self):\n return 10\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = '-created_at',\n",
"step-3": "<mask token>\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=100)\n slug = models.CharField(max_length=150)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *arg, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Tags, self).save(*arg, **kwargs)\n\n\n class Meta:\n ordering = '-created_at',\n\n\nclass Newsletter(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=10000)\n image = models.ImageField()\n target = models.IntegerField()\n frequency = models.CharField(max_length=10, choices=FREQUENCY, default=\n 'monthly')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n tag = models.ManyToManyField(Tags)\n\n @property\n def subscribed(self):\n return 10\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = '-created_at',\n",
"step-4": "from django.utils.text import slugify\nfrom pyexpat import model\nfrom django.db import models\nfrom rest_framework_simplejwt.state import User\nFREQUENCY = ('daily', 'Diario'), ('weekly', 'Semanal'), ('monthly', 'Mensual')\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=100)\n slug = models.CharField(max_length=150)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *arg, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Tags, self).save(*arg, **kwargs)\n\n\n class Meta:\n ordering = '-created_at',\n\n\nclass Newsletter(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=10000)\n image = models.ImageField()\n target = models.IntegerField()\n frequency = models.CharField(max_length=10, choices=FREQUENCY, default=\n 'monthly')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n tag = models.ManyToManyField(Tags)\n\n @property\n def subscribed(self):\n return 10\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = '-created_at',\n",
"step-5": "from django.utils.text import slugify\nfrom pyexpat import model\nfrom django.db import models\n# Create your models here.\nfrom rest_framework_simplejwt.state import User\n\nFREQUENCY = (\n ('daily', 'Diario'),\n ('weekly', 'Semanal'),\n ('monthly', 'Mensual')\n)\n\nclass Tags(models.Model):\n name = models.CharField(max_length=100)\n slug = models.CharField(max_length=150)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *arg, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Tags, self).save(*arg, **kwargs)\n\n class Meta:\n ordering = ('-created_at',)\n\nclass Newsletter(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=10000)\n image = models.ImageField()\n target = models.IntegerField()\n frequency = models.CharField(max_length=10, choices=FREQUENCY, default='monthly')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n tag = models.ManyToManyField(Tags)\n @property\n def subscribed(self):\n return 10\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ('-created_at',)\n\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("same_host").setMaster("local")
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv")
augFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv")
julyFirstLogs = julyFirstLogs.map(lambda line: line.split("\t")[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split("\t")[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != "host")
cleanedHostIntersection.saveAsTextFile("out/nasa_logs_same_hosts.csv")
|
normal
|
{
"blob_id": "36fce3837e0341d94ff6099a06be8cf757a1cfa9",
"index": 3596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-3": "<mask token>\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-4": "from pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-5": "from pyspark import SparkContext, SparkConf\n\nconf = SparkConf().setAppName(\"same_host\").setMaster(\"local\")\nsc = SparkContext(conf=conf)\n\njulyFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv\")\naugFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv\")\n\n\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split(\"\\t\")[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split(\"\\t\")[0])\n\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != \"host\")\ncleanedHostIntersection.saveAsTextFile(\"out/nasa_logs_same_hosts.csv\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2021 Koichi Sakata
from pylib_sakata import init as init
# uncomment the follows when the file is executed in a Python console.
# init.close_all()
# init.clear_all()
import os
import shutil
import numpy as np
from control import matlab
from pylib_sakata import ctrl
from pylib_sakata import plot
print('Start simulation!')
# Common parameters
figurefolderName = 'figure_2mass_pl'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
Ts = 1/4000
dataNum = 10000
freqrange = [1, 1000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)
s = ctrl.tf([1, 0], [1])
z = ctrl.tf([1, 0], [1], Ts)
print('Common parameters were set.')
# Plant model
M1 = 1.0
M2 = 1.0
M = M1 + M2
C = 10.0
K = 0.0
Creso = 10.0
Kreso = 50000.0
k1 = M2/(M1 * (M1 + M2))
k2 = -1.0/(M1 + M2)
omegaPreso = np.sqrt(Kreso * (M1 + M2)/(M1 * M2))
zetaPreso = 0.5 * Creso*np.sqrt((M1 + M2)/(Kreso * M1 * M2))
Pmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
Pmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
numDelay, denDelay = matlab.pade(Ts*4, n=4)
Ds = ctrl.tf(numDelay, denDelay)
Dz = z**-4
Pns1 = Pmechs1 * Ds
Pns2 = Pmechs2 * Ds
Pnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz
Pnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz
Pnz1_frd = ctrl.sys2frd(Pnz1, freq)
Pnz2_frd = ctrl.sys2frd(Pnz2, freq)
print('Plant model was set.')
# Design PID controller
freq1 = 10.0
zeta1 = 1.0
freq2 = 10.0
zeta2 = 1.0
Cz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)
Cz_frd = ctrl.sys2frd(Cz, freq)
print('PID controller was designed.')
# Design phase lead filter
zeta1 = 0.7
freq1 = 40
zeta2 = 0.7
freq2 = 60
PLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)
PLz1_frd = ctrl.sys2frd(PLz1, freq)
PLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)
PLz2_frd = ctrl.sys2frd(PLz2, freq)
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
# Motor side
Gn1_frd = Pnz1_frd * Cz_frd
Sn1_frd = 1/(1 + Gn1_frd)
Tn1_frd = 1 - Sn1_frd
Gn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd
Sn1_pl_frd = 1/(1 + Gn1_pl_frd)
Tn1_pl_frd = 1 - Sn1_pl_frd
# Load side
Gn2_frd = Pnz2_frd * Cz_frd
Sn2_frd = 1/(1 + Gn2_frd)
Tn2_frd = 1 - Sn2_frd
Gn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd
Sn2_pl_frd = 1/(1 + Gn2_pl_frd)
Tn2_pl_frd = 1 - Sn2_pl_frd
print('Plotting figures...')
# Plant
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_P.png')
# PID controller
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange, title='Frequency response of PID controller')
plot.savefig(figurefolderName+'/freq_C.png')
# Phase lead filters
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange, [-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName+'/freq_PL.png')
# Open loop function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_G.png')
# Sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(111)
ax_phase = None
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_S.png')
# Complementary sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_T.png')
# Nyquist
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5], yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist_.png')
print('Finished.')
|
normal
|
{
"blob_id": "ad1aa69f92f104ac8b82aca3c0a64ce3de48b36d",
"index": 3847,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Start simulation!')\n<mask token>\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\n<mask token>\nprint('Common parameters were set.')\n<mask token>\nprint('Plant model was set.')\n<mask token>\nprint('PID controller was designed.')\n<mask token>\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\n<mask token>\nprint('Plotting figures...')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\n<mask token>\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\n<mask token>\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-3": "<mask token>\nprint('Start simulation!')\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1 / 4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,\n base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2 / (M1 * (M1 + M2))\nk2 = -1.0 / (M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))\nzetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nnumDelay, denDelay = matlab.pade(Ts * 4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z ** -4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1 / (1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1 / (1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1 / (1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1 / (1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\nprint('Plotting figures...')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-4": "from pylib_sakata import init as init\nimport os\nimport shutil\nimport numpy as np\nfrom control import matlab\nfrom pylib_sakata import ctrl\nfrom pylib_sakata import plot\nprint('Start simulation!')\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1 / 4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,\n base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2 / (M1 * (M1 + M2))\nk2 = -1.0 / (M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))\nzetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nnumDelay, denDelay = matlab.pade(Ts * 4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z ** -4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1 / (1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1 / (1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1 / (1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1 / (1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\nprint('Plotting figures...')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-5": "# Copyright (c) 2021 Koichi Sakata\n\n\nfrom pylib_sakata import init as init\n# uncomment the follows when the file is executed in a Python console.\n# init.close_all()\n# init.clear_all()\n\nimport os\nimport shutil\nimport numpy as np\nfrom control import matlab\nfrom pylib_sakata import ctrl\nfrom pylib_sakata import plot\n\nprint('Start simulation!')\n\n# Common parameters\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1/4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\n\n# Plant model\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2/(M1 * (M1 + M2))\nk2 = -1.0/(M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2)/(M1 * M2))\nzetaPreso = 0.5 * Creso*np.sqrt((M1 + M2)/(Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])\nnumDelay, denDelay = matlab.pade(Ts*4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z**-4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\n\n# Design PID controller\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\n\n# Design phase lead filter\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\n\nprint('Frequency respose alanysis is running...')\n# Motor side\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1/(1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\n\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1/(1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\n\n# Load side\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1/(1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\n\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1/(1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\n\nprint('Plotting figures...')\n# Plant\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName+'/freq_P.png')\n\n# PID controller\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange, title='Frequency response of PID controller')\nplot.savefig(figurefolderName+'/freq_C.png')\n\n# Phase lead filters\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange, [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName+'/freq_PL.png')\n\n# Open loop function\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName+'/freq_G.png')\n\n# Sensitivity function\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.savefig(figurefolderName+'/freq_S.png')\n\n# Complementary sensitivity function\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.savefig(figurefolderName+'/freq_T.png')\n\n# Nyquist\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName+'/nyquist.png')\n\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5], yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName+'/nyquist_.png')\n\nprint('Finished.')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import tensorflow as tf
def mfcc(data):
pass
def cut_frames(data):
pass
|
normal
|
{
"blob_id": "8411acf6b27425357d212f5e220314daa019e023",
"index": 9669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cut_frames(data):\n pass\n",
"step-3": "<mask token>\n\n\ndef mfcc(data):\n pass\n\n\ndef cut_frames(data):\n pass\n",
"step-4": "import numpy as np\nimport tensorflow as tf\n\n\ndef mfcc(data):\n pass\n\n\ndef cut_frames(data):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import os
import traceback
from src.properties import *
from src.utils import *
from subprocess import call
from src.entity.cursor import Cursor
from curses import *
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt: # quit properly, when user press Ctrl + C
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch() # get the key pressed after cmd or alt
if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102: # CMD + RIGHT
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE: # CMD + DELETE
cursor.deleteWholeLine()
elif char == KEY_DOWN: # CMD + DOWN
cursor.moveToRightBottomMost()
elif char == KEY_UP: # CMD + UP
cursor.moveToRightUpMost()
else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, "Quit and Save (ENTER)")
screen.addstr(y + 1, x, "Quit (F9)")
screen.addstr(y + 2, x, "Go Back (Any Key)")
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal("Shit just happen, sorry.")
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal("Quit, safe and sound.")
elif exit_code == 2:
printToTerminal("Quit without save.")
elif exit_code == 3:
printToTerminal("saved !")
elif exit_code == 4: # -version
printToTerminal(VERSION)
elif exit_code == 5: # -help
printToTerminal("======================== Welcome to Simple Editor X ========================", "GREEN")
printToTerminal("")
printToTerminal("Arguments:")
printToTerminal(" -version")
printToTerminal(" -help")
printToTerminal(" {file_name}, to start editing an existing or create a new file")
printToTerminal("")
printToTerminal("While using:")
printToTerminal(" Press F1, then ENTER to save")
printToTerminal("")
if __name__== "__main__":
if len(sys.argv) != 2:
printToTerminal("This application take exactly 1 argument")
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ""
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == "-v" or arg == "-version":
exit_code = 4
elif arg == "-h" or arg == "-help":
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
|
normal
|
{
"blob_id": "7a6d45ef87d93af9a15bd352b893164d3a36c399",
"index": 7545,
"step-1": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-4": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-5": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt: # quit properly, when user press Ctrl + C\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch() # get the key pressed after cmd or alt\n if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102: # CMD + RIGHT\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE: # CMD + DELETE\n cursor.deleteWholeLine()\n elif char == KEY_DOWN: # CMD + DOWN\n cursor.moveToRightBottomMost()\n elif char == KEY_UP: # CMD + UP\n cursor.moveToRightUpMost()\n else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, \"Quit and Save (ENTER)\")\n screen.addstr(y + 1, x, \"Quit (F9)\")\n screen.addstr(y + 2, x, \"Go Back (Any Key)\")\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal(\"Shit just happen, sorry.\")\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal(\"Quit, safe and sound.\")\n elif exit_code == 2:\n printToTerminal(\"Quit without save.\")\n elif exit_code == 3:\n printToTerminal(\"saved !\")\n elif exit_code == 4: # -version\n printToTerminal(VERSION)\n elif exit_code == 5: # -help\n printToTerminal(\"======================== Welcome to Simple Editor X ========================\", \"GREEN\")\n printToTerminal(\"\")\n printToTerminal(\"Arguments:\")\n printToTerminal(\" -version\")\n printToTerminal(\" -help\")\n printToTerminal(\" {file_name}, to start editing an existing or create a new file\")\n printToTerminal(\"\")\n printToTerminal(\"While using:\")\n printToTerminal(\" Press F1, then ENTER to save\")\n printToTerminal(\"\")\n\n\nif __name__== \"__main__\":\n if len(sys.argv) != 2:\n printToTerminal(\"This application take exactly 1 argument\")\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = \"\"\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == \"-v\" or arg == \"-version\":\n exit_code = 4\n elif arg == \"-h\" or arg == \"-help\":\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n\n printExitMessage(exit_code, error_msg)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os
import struct
import sys
import wave
sys.path.insert(0, os.path.dirname(__file__))
C5 = 523
B4b = 466
G4 = 392
E5 = 659
F5 = 698
VOLUME = 12000
notes = [
[VOLUME, C5],
[VOLUME, C5],
[VOLUME, B4b],
[VOLUME, C5],
[0, C5],
[VOLUME, G4],
[0, C5],
[VOLUME, G4],
[VOLUME, C5],
[VOLUME, F5],
[VOLUME, E5],
[VOLUME, C5],
]
from fade import fade
from gain import gain
from repeat import repeat
from square import square_wave
all_samples = []
quarter_second = 44100 // 4
for volume, frequency in notes:
samples = square_wave(int(44100 / frequency // 2))
samples = gain(samples, volume)
samples = repeat(samples, quarter_second)
samples = fade(samples, quarter_second)
all_samples.extend(samples)
all_samples = [int(sample) for sample in all_samples]
w = wave.open('music.wav', 'wb')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(44100)
w.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))
|
normal
|
{
"blob_id": "4fb563985bd99599e88676e167ee84a95b018aba",
"index": 5414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.dirname(__file__))\n<mask token>\nfor volume, frequency in notes:\n samples = square_wave(int(44100 / frequency // 2))\n samples = gain(samples, volume)\n samples = repeat(samples, quarter_second)\n samples = fade(samples, quarter_second)\n all_samples.extend(samples)\n<mask token>\nw.setnchannels(1)\nw.setsampwidth(2)\nw.setframerate(44100)\nw.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.dirname(__file__))\nC5 = 523\nB4b = 466\nG4 = 392\nE5 = 659\nF5 = 698\nVOLUME = 12000\nnotes = [[VOLUME, C5], [VOLUME, C5], [VOLUME, B4b], [VOLUME, C5], [0, C5],\n [VOLUME, G4], [0, C5], [VOLUME, G4], [VOLUME, C5], [VOLUME, F5], [\n VOLUME, E5], [VOLUME, C5]]\n<mask token>\nall_samples = []\nquarter_second = 44100 // 4\nfor volume, frequency in notes:\n samples = square_wave(int(44100 / frequency // 2))\n samples = gain(samples, volume)\n samples = repeat(samples, quarter_second)\n samples = fade(samples, quarter_second)\n all_samples.extend(samples)\nall_samples = [int(sample) for sample in all_samples]\nw = wave.open('music.wav', 'wb')\nw.setnchannels(1)\nw.setsampwidth(2)\nw.setframerate(44100)\nw.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))\n",
"step-4": "import os\nimport struct\nimport sys\nimport wave\nsys.path.insert(0, os.path.dirname(__file__))\nC5 = 523\nB4b = 466\nG4 = 392\nE5 = 659\nF5 = 698\nVOLUME = 12000\nnotes = [[VOLUME, C5], [VOLUME, C5], [VOLUME, B4b], [VOLUME, C5], [0, C5],\n [VOLUME, G4], [0, C5], [VOLUME, G4], [VOLUME, C5], [VOLUME, F5], [\n VOLUME, E5], [VOLUME, C5]]\nfrom fade import fade\nfrom gain import gain\nfrom repeat import repeat\nfrom square import square_wave\nall_samples = []\nquarter_second = 44100 // 4\nfor volume, frequency in notes:\n samples = square_wave(int(44100 / frequency // 2))\n samples = gain(samples, volume)\n samples = repeat(samples, quarter_second)\n samples = fade(samples, quarter_second)\n all_samples.extend(samples)\nall_samples = [int(sample) for sample in all_samples]\nw = wave.open('music.wav', 'wb')\nw.setnchannels(1)\nw.setsampwidth(2)\nw.setframerate(44100)\nw.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))\n",
"step-5": "import os\nimport struct\nimport sys\nimport wave\n\nsys.path.insert(0, os.path.dirname(__file__))\n\nC5 = 523\nB4b = 466\nG4 = 392\nE5 = 659\nF5 = 698\nVOLUME = 12000\n\nnotes = [\n [VOLUME, C5],\n [VOLUME, C5],\n [VOLUME, B4b],\n [VOLUME, C5],\n [0, C5],\n [VOLUME, G4],\n [0, C5],\n [VOLUME, G4],\n [VOLUME, C5],\n [VOLUME, F5],\n [VOLUME, E5],\n [VOLUME, C5],\n]\n\nfrom fade import fade\nfrom gain import gain\nfrom repeat import repeat\nfrom square import square_wave\n\nall_samples = []\nquarter_second = 44100 // 4\nfor volume, frequency in notes:\n samples = square_wave(int(44100 / frequency // 2))\n samples = gain(samples, volume)\n samples = repeat(samples, quarter_second)\n samples = fade(samples, quarter_second)\n all_samples.extend(samples)\n\nall_samples = [int(sample) for sample in all_samples]\n\nw = wave.open('music.wav', 'wb')\nw.setnchannels(1)\nw.setsampwidth(2)\nw.setframerate(44100)\nw.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import numpy as np
class Board:
def __init__(self, nrows, ncols, random_seed=42):
self.nrows = nrows
self.ncols = ncols
self.random = random.Random()
self.random.seed(random_seed)
self.board = np.zeros((nrows, ncols))
self.score = 0
self.__add_new_numbers()
# Initialize with 1/8 of the board filled, with 90% chance of filling
# with 2, and 10% chance of filling with 4
def __add_new_numbers(self):
num_zeros = (self.board == 0).sum()
for i in range(min((self.nrows*self.ncols)//8, num_zeros)):
random_row = self.random.randint(0,self.nrows-1)
random_col = self.random.randint(0,self.ncols-1)
while self.board[random_row, random_col] != 0:
random_row = self.random.randint(0,self.nrows-1)
random_col = self.random.randint(0,self.ncols-1)
if self.random.random() < 0.9:
self.board[random_row, random_col] = 2
else:
self.board[random_row, random_col] = 4
def __swap_on_board(self, pos1, pos2):
val = self.board[pos1]
self.board[pos1] = self.board[pos2]
self.board[pos2] = val
def __left_swipe_on_row(self, row_index):
left_index = 0
collapsed = False
for i in range(self.ncols):
if self.board[row_index, i] != 0:
if left_index != i:
collapsed = True
self.__swap_on_board((row_index, left_index), (row_index, i))
left_index += 1
for i in range(1, self.ncols):
if self.board[row_index, i] == self.board[row_index, i-1]:
self.board[row_index, i-1] *= 2
self.board[row_index, i] = 0
collapsed = True
self.score += self.board[row_index, i-1]
left_index = 0
for i in range(self.ncols):
if self.board[row_index, i] != 0:
self.__swap_on_board((row_index, left_index), (row_index, i))
left_index += 1
return collapsed
def __up_swipe_on_col(self, col_index):
top_index = 0
collapsed = False
for i in range(self.nrows):
if self.board[i, col_index] != 0:
if top_index != i:
collapsed = True
self.__swap_on_board((top_index, col_index), (i, col_index))
top_index += 1
for i in range(1, self.nrows):
if self.board[i, col_index] == self.board[i-1, col_index]:
self.board[i-1, col_index] *= 2
self.board[i, col_index] = 0
collapsed = True
self.score += self.board[i-1, col_index]
top_index = 0
for i in range(self.nrows):
if self.board[i, col_index] != 0:
self.__swap_on_board((top_index, col_index), (i, col_index))
top_index += 1
return collapsed
def __left_swipe(self):
collapsed = False
for i in range(self.nrows):
if self.__left_swipe_on_row(i):
collapsed = True
return collapsed
def __right_swipe(self):
collapsed = False
for i in range(self.nrows):
for j in range(self.ncols//2):
self.__swap_on_board((i, j), (i, -j-1))
if self.__left_swipe_on_row(i):
collapsed = True
for j in range(self.ncols//2):
self.__swap_on_board((i, j), (i, -j-1))
return collapsed
def __up_swipe(self):
collapsed = False
for i in range(self.ncols):
if self.__up_swipe_on_col(i):
collapsed = True
return collapsed
def __down_swipe(self):
collapsed = False
for i in range(self.ncols):
for j in range(self.nrows//2):
self.__swap_on_board((j, i), (-j-1, i))
if self.__up_swipe_on_col(i):
collapsed = True
for j in range(self.nrows//2):
self.__swap_on_board((j, i), (-j-1, i))
return collapsed
def __display(self):
print(self.board)
print(f"Current score: {self.score}")
def reset(self):
self.score = 0
for i in range(self.nrows):
for j in range(self.ncols):
self.board[i, j] = 0
def play(self):
moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe, self.__right_swipe]
movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')}
board_copy = Board(self.nrows, self.ncols)
lost = False
while self.board.max() < 2048:
self.__display()
raw_input_value = input("Play with WASD: ").upper()
while len(raw_input_value) == 0:
raw_input_value = input("Play with WASD: ").upper()
direction = raw_input_value[0]
while direction not in movement_mapping:
raw_input_value = input("Play with WASD: ").upper()
while len(raw_input_value) == 0:
raw_input_value = input("Play with WASD: ").upper()
direction = raw_input_value[0]
did_move = movement_mapping[direction]()
if did_move:
self.__add_new_numbers()
# TODO: Make this more efficient
board_copy.board = self.board.copy()
can_swipe_up = board_copy.__up_swipe()
can_swipe_left = board_copy.__left_swipe()
if not (can_swipe_left or can_swipe_up):
lost = True
break
else:
print(f"'{direction}'" + ' is an INVALID MOVE')
if not lost:
print('GAME WON')
else:
print('GAME LOST')
self.__display()
output = self.score
self.reset()
return output
if __name__ == "__main__":
b = Board(4, 4)
b.play()
|
normal
|
{
"blob_id": "cab45a823e319bd504b3db68cf70bff315f44fc6",
"index": 7462,
"step-1": "<mask token>\n\n\nclass Board:\n\n def __init__(self, nrows, ncols, random_seed=42):\n self.nrows = nrows\n self.ncols = ncols\n self.random = random.Random()\n self.random.seed(random_seed)\n self.board = np.zeros((nrows, ncols))\n self.score = 0\n self.__add_new_numbers()\n <mask token>\n\n def __swap_on_board(self, pos1, pos2):\n val = self.board[pos1]\n self.board[pos1] = self.board[pos2]\n self.board[pos2] = val\n <mask token>\n <mask token>\n\n def __left_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n if self.__left_swipe_on_row(i):\n collapsed = True\n return collapsed\n\n def __right_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n if self.__left_swipe_on_row(i):\n collapsed = True\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n return collapsed\n\n def __up_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n if self.__up_swipe_on_col(i):\n collapsed = True\n return collapsed\n\n def __down_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n if self.__up_swipe_on_col(i):\n collapsed = True\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n return collapsed\n\n def __display(self):\n print(self.board)\n print(f'Current score: {self.score}')\n\n def reset(self):\n self.score = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.board[i, j] = 0\n\n def play(self):\n moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe,\n self.__right_swipe]\n movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')\n }\n board_copy = Board(self.nrows, self.ncols)\n lost = False\n while self.board.max() < 2048:\n self.__display()\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n while direction not in movement_mapping:\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n did_move = movement_mapping[direction]()\n if did_move:\n self.__add_new_numbers()\n board_copy.board = self.board.copy()\n can_swipe_up = board_copy.__up_swipe()\n can_swipe_left = board_copy.__left_swipe()\n if not (can_swipe_left or can_swipe_up):\n lost = True\n break\n else:\n print(f\"'{direction}'\" + ' is an INVALID MOVE')\n if not lost:\n print('GAME WON')\n else:\n print('GAME LOST')\n self.__display()\n output = self.score\n self.reset()\n return output\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Board:\n\n def __init__(self, nrows, ncols, random_seed=42):\n self.nrows = nrows\n self.ncols = ncols\n self.random = random.Random()\n self.random.seed(random_seed)\n self.board = np.zeros((nrows, ncols))\n self.score = 0\n self.__add_new_numbers()\n\n def __add_new_numbers(self):\n num_zeros = (self.board == 0).sum()\n for i in range(min(self.nrows * self.ncols // 8, num_zeros)):\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n while self.board[random_row, random_col] != 0:\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n if self.random.random() < 0.9:\n self.board[random_row, random_col] = 2\n else:\n self.board[random_row, random_col] = 4\n\n def __swap_on_board(self, pos1, pos2):\n val = self.board[pos1]\n self.board[pos1] = self.board[pos2]\n self.board[pos2] = val\n\n def __left_swipe_on_row(self, row_index):\n left_index = 0\n collapsed = False\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n if left_index != i:\n collapsed = True\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n for i in range(1, self.ncols):\n if self.board[row_index, i] == self.board[row_index, i - 1]:\n self.board[row_index, i - 1] *= 2\n self.board[row_index, i] = 0\n collapsed = True\n self.score += self.board[row_index, i - 1]\n left_index = 0\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n return collapsed\n <mask token>\n\n def __left_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n if self.__left_swipe_on_row(i):\n collapsed = True\n return collapsed\n\n def __right_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n if self.__left_swipe_on_row(i):\n collapsed = True\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n return collapsed\n\n def __up_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n if self.__up_swipe_on_col(i):\n collapsed = True\n return collapsed\n\n def __down_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n if self.__up_swipe_on_col(i):\n collapsed = True\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n return collapsed\n\n def __display(self):\n print(self.board)\n print(f'Current score: {self.score}')\n\n def reset(self):\n self.score = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.board[i, j] = 0\n\n def play(self):\n moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe,\n self.__right_swipe]\n movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')\n }\n board_copy = Board(self.nrows, self.ncols)\n lost = False\n while self.board.max() < 2048:\n self.__display()\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n while direction not in movement_mapping:\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n did_move = movement_mapping[direction]()\n if did_move:\n self.__add_new_numbers()\n board_copy.board = self.board.copy()\n can_swipe_up = board_copy.__up_swipe()\n can_swipe_left = board_copy.__left_swipe()\n if not (can_swipe_left or can_swipe_up):\n lost = True\n break\n else:\n print(f\"'{direction}'\" + ' is an INVALID MOVE')\n if not lost:\n print('GAME WON')\n else:\n print('GAME LOST')\n self.__display()\n output = self.score\n self.reset()\n return output\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Board:\n\n def __init__(self, nrows, ncols, random_seed=42):\n self.nrows = nrows\n self.ncols = ncols\n self.random = random.Random()\n self.random.seed(random_seed)\n self.board = np.zeros((nrows, ncols))\n self.score = 0\n self.__add_new_numbers()\n\n def __add_new_numbers(self):\n num_zeros = (self.board == 0).sum()\n for i in range(min(self.nrows * self.ncols // 8, num_zeros)):\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n while self.board[random_row, random_col] != 0:\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n if self.random.random() < 0.9:\n self.board[random_row, random_col] = 2\n else:\n self.board[random_row, random_col] = 4\n\n def __swap_on_board(self, pos1, pos2):\n val = self.board[pos1]\n self.board[pos1] = self.board[pos2]\n self.board[pos2] = val\n\n def __left_swipe_on_row(self, row_index):\n left_index = 0\n collapsed = False\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n if left_index != i:\n collapsed = True\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n for i in range(1, self.ncols):\n if self.board[row_index, i] == self.board[row_index, i - 1]:\n self.board[row_index, i - 1] *= 2\n self.board[row_index, i] = 0\n collapsed = True\n self.score += self.board[row_index, i - 1]\n left_index = 0\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n return collapsed\n\n def __up_swipe_on_col(self, col_index):\n top_index = 0\n collapsed = False\n for i in range(self.nrows):\n if self.board[i, col_index] != 0:\n if top_index != i:\n collapsed = True\n self.__swap_on_board((top_index, col_index), (i, col_index))\n top_index += 1\n for i in range(1, self.nrows):\n if self.board[i, col_index] == self.board[i - 1, col_index]:\n self.board[i - 1, col_index] *= 2\n self.board[i, col_index] = 0\n collapsed = True\n self.score += self.board[i - 1, col_index]\n top_index = 0\n for i in range(self.nrows):\n if self.board[i, col_index] != 0:\n self.__swap_on_board((top_index, col_index), (i, col_index))\n top_index += 1\n return collapsed\n\n def __left_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n if self.__left_swipe_on_row(i):\n collapsed = True\n return collapsed\n\n def __right_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n if self.__left_swipe_on_row(i):\n collapsed = True\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n return collapsed\n\n def __up_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n if self.__up_swipe_on_col(i):\n collapsed = True\n return collapsed\n\n def __down_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n if self.__up_swipe_on_col(i):\n collapsed = True\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n return collapsed\n\n def __display(self):\n print(self.board)\n print(f'Current score: {self.score}')\n\n def reset(self):\n self.score = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.board[i, j] = 0\n\n def play(self):\n moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe,\n self.__right_swipe]\n movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')\n }\n board_copy = Board(self.nrows, self.ncols)\n lost = False\n while self.board.max() < 2048:\n self.__display()\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n while direction not in movement_mapping:\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n did_move = movement_mapping[direction]()\n if did_move:\n self.__add_new_numbers()\n board_copy.board = self.board.copy()\n can_swipe_up = board_copy.__up_swipe()\n can_swipe_left = board_copy.__left_swipe()\n if not (can_swipe_left or can_swipe_up):\n lost = True\n break\n else:\n print(f\"'{direction}'\" + ' is an INVALID MOVE')\n if not lost:\n print('GAME WON')\n else:\n print('GAME LOST')\n self.__display()\n output = self.score\n self.reset()\n return output\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Board:\n\n def __init__(self, nrows, ncols, random_seed=42):\n self.nrows = nrows\n self.ncols = ncols\n self.random = random.Random()\n self.random.seed(random_seed)\n self.board = np.zeros((nrows, ncols))\n self.score = 0\n self.__add_new_numbers()\n\n def __add_new_numbers(self):\n num_zeros = (self.board == 0).sum()\n for i in range(min(self.nrows * self.ncols // 8, num_zeros)):\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n while self.board[random_row, random_col] != 0:\n random_row = self.random.randint(0, self.nrows - 1)\n random_col = self.random.randint(0, self.ncols - 1)\n if self.random.random() < 0.9:\n self.board[random_row, random_col] = 2\n else:\n self.board[random_row, random_col] = 4\n\n def __swap_on_board(self, pos1, pos2):\n val = self.board[pos1]\n self.board[pos1] = self.board[pos2]\n self.board[pos2] = val\n\n def __left_swipe_on_row(self, row_index):\n left_index = 0\n collapsed = False\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n if left_index != i:\n collapsed = True\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n for i in range(1, self.ncols):\n if self.board[row_index, i] == self.board[row_index, i - 1]:\n self.board[row_index, i - 1] *= 2\n self.board[row_index, i] = 0\n collapsed = True\n self.score += self.board[row_index, i - 1]\n left_index = 0\n for i in range(self.ncols):\n if self.board[row_index, i] != 0:\n self.__swap_on_board((row_index, left_index), (row_index, i))\n left_index += 1\n return collapsed\n\n def __up_swipe_on_col(self, col_index):\n top_index = 0\n collapsed = False\n for i in range(self.nrows):\n if self.board[i, col_index] != 0:\n if top_index != i:\n collapsed = True\n self.__swap_on_board((top_index, col_index), (i, col_index))\n top_index += 1\n for i in range(1, self.nrows):\n if self.board[i, col_index] == self.board[i - 1, col_index]:\n self.board[i - 1, col_index] *= 2\n self.board[i, col_index] = 0\n collapsed = True\n self.score += self.board[i - 1, col_index]\n top_index = 0\n for i in range(self.nrows):\n if self.board[i, col_index] != 0:\n self.__swap_on_board((top_index, col_index), (i, col_index))\n top_index += 1\n return collapsed\n\n def __left_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n if self.__left_swipe_on_row(i):\n collapsed = True\n return collapsed\n\n def __right_swipe(self):\n collapsed = False\n for i in range(self.nrows):\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n if self.__left_swipe_on_row(i):\n collapsed = True\n for j in range(self.ncols // 2):\n self.__swap_on_board((i, j), (i, -j - 1))\n return collapsed\n\n def __up_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n if self.__up_swipe_on_col(i):\n collapsed = True\n return collapsed\n\n def __down_swipe(self):\n collapsed = False\n for i in range(self.ncols):\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n if self.__up_swipe_on_col(i):\n collapsed = True\n for j in range(self.nrows // 2):\n self.__swap_on_board((j, i), (-j - 1, i))\n return collapsed\n\n def __display(self):\n print(self.board)\n print(f'Current score: {self.score}')\n\n def reset(self):\n self.score = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.board[i, j] = 0\n\n def play(self):\n moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe,\n self.__right_swipe]\n movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')\n }\n board_copy = Board(self.nrows, self.ncols)\n lost = False\n while self.board.max() < 2048:\n self.__display()\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n while direction not in movement_mapping:\n raw_input_value = input('Play with WASD: ').upper()\n while len(raw_input_value) == 0:\n raw_input_value = input('Play with WASD: ').upper()\n direction = raw_input_value[0]\n did_move = movement_mapping[direction]()\n if did_move:\n self.__add_new_numbers()\n board_copy.board = self.board.copy()\n can_swipe_up = board_copy.__up_swipe()\n can_swipe_left = board_copy.__left_swipe()\n if not (can_swipe_left or can_swipe_up):\n lost = True\n break\n else:\n print(f\"'{direction}'\" + ' is an INVALID MOVE')\n if not lost:\n print('GAME WON')\n else:\n print('GAME LOST')\n self.__display()\n output = self.score\n self.reset()\n return output\n\n\nif __name__ == '__main__':\n b = Board(4, 4)\n b.play()\n",
"step-5": "import random\nimport numpy as np\n\nclass Board:\n\tdef __init__(self, nrows, ncols, random_seed=42):\n\t\tself.nrows = nrows\n\t\tself.ncols = ncols\n\t\tself.random = random.Random()\n\t\tself.random.seed(random_seed)\n\t\tself.board = np.zeros((nrows, ncols))\n\t\tself.score = 0\n\n\t\tself.__add_new_numbers()\n\n\t# Initialize with 1/8 of the board filled, with 90% chance of filling\n\t# with 2, and 10% chance of filling with 4\n\tdef __add_new_numbers(self):\n\n\t\tnum_zeros = (self.board == 0).sum()\n\n\t\tfor i in range(min((self.nrows*self.ncols)//8, num_zeros)):\n\t\t\trandom_row = self.random.randint(0,self.nrows-1)\n\t\t\trandom_col = self.random.randint(0,self.ncols-1)\n\t\t\twhile self.board[random_row, random_col] != 0:\n\t\t\t\trandom_row = self.random.randint(0,self.nrows-1)\n\t\t\t\trandom_col = self.random.randint(0,self.ncols-1)\n\t\t\tif self.random.random() < 0.9:\n\t\t\t\tself.board[random_row, random_col] = 2\n\t\t\telse:\n\t\t\t\tself.board[random_row, random_col] = 4\n\n\tdef __swap_on_board(self, pos1, pos2):\n\t\tval = self.board[pos1]\n\t\tself.board[pos1] = self.board[pos2]\n\t\tself.board[pos2] = val\n\n\tdef __left_swipe_on_row(self, row_index):\n\t\tleft_index = 0\n\t\tcollapsed = False\n\t\tfor i in range(self.ncols):\n\t\t\tif self.board[row_index, i] != 0:\n\t\t\t\tif left_index != i:\n\t\t\t\t\tcollapsed = True\n\t\t\t\tself.__swap_on_board((row_index, left_index), (row_index, i))\n\t\t\t\tleft_index += 1\n\n\t\tfor i in range(1, self.ncols):\n\t\t\tif self.board[row_index, i] == self.board[row_index, i-1]:\n\t\t\t\tself.board[row_index, i-1] *= 2\n\t\t\t\tself.board[row_index, i] = 0\n\t\t\t\tcollapsed = True\n\t\t\t\tself.score += self.board[row_index, i-1]\n\n\t\tleft_index = 0\n\t\tfor i in range(self.ncols):\n\t\t\tif self.board[row_index, i] != 0:\n\t\t\t\tself.__swap_on_board((row_index, left_index), (row_index, i))\n\t\t\t\tleft_index += 1\n\n\t\treturn collapsed\n\n\tdef __up_swipe_on_col(self, col_index):\n\t\ttop_index = 0\n\t\tcollapsed = False\n\t\tfor i in range(self.nrows):\n\t\t\tif self.board[i, col_index] != 0:\n\t\t\t\tif top_index != i:\n\t\t\t\t\tcollapsed = True\n\t\t\t\tself.__swap_on_board((top_index, col_index), (i, col_index))\n\t\t\t\ttop_index += 1\n\n\t\tfor i in range(1, self.nrows):\n\t\t\tif self.board[i, col_index] == self.board[i-1, col_index]:\n\t\t\t\tself.board[i-1, col_index] *= 2\n\t\t\t\tself.board[i, col_index] = 0\n\t\t\t\tcollapsed = True\n\t\t\t\tself.score += self.board[i-1, col_index]\n\n\t\ttop_index = 0\n\t\tfor i in range(self.nrows):\n\t\t\tif self.board[i, col_index] != 0:\n\t\t\t\tself.__swap_on_board((top_index, col_index), (i, col_index))\n\t\t\t\ttop_index += 1\n\n\t\treturn collapsed\n\n\tdef __left_swipe(self):\n\t\tcollapsed = False\n\t\tfor i in range(self.nrows):\n\t\t\tif self.__left_swipe_on_row(i):\n\t\t\t\tcollapsed = True\n\t\treturn collapsed\n\n\tdef __right_swipe(self):\n\t\tcollapsed = False\n\t\tfor i in range(self.nrows):\n\t\t\tfor j in range(self.ncols//2):\n\t\t\t\tself.__swap_on_board((i, j), (i, -j-1))\n\t\t\tif self.__left_swipe_on_row(i):\n\t\t\t\tcollapsed = True\n\t\t\tfor j in range(self.ncols//2):\n\t\t\t\tself.__swap_on_board((i, j), (i, -j-1))\n\t\treturn collapsed\n\n\tdef __up_swipe(self):\n\t\tcollapsed = False\n\t\tfor i in range(self.ncols):\n\t\t\tif self.__up_swipe_on_col(i):\n\t\t\t\tcollapsed = True\n\t\treturn collapsed\n\n\tdef __down_swipe(self):\n\t\tcollapsed = False\n\t\tfor i in range(self.ncols):\n\t\t\tfor j in range(self.nrows//2):\n\t\t\t\tself.__swap_on_board((j, i), (-j-1, i))\n\t\t\tif self.__up_swipe_on_col(i):\n\t\t\t\tcollapsed = True\n\t\t\tfor j in range(self.nrows//2):\n\t\t\t\tself.__swap_on_board((j, i), (-j-1, i))\n\t\treturn collapsed\n\n\tdef __display(self):\n\t\tprint(self.board)\n\t\tprint(f\"Current score: {self.score}\")\n\n\tdef reset(self):\n\t\tself.score = 0\n\t\tfor i in range(self.nrows):\n\t\t\tfor j in range(self.ncols):\n\t\t\t\tself.board[i, j] = 0\n\n\tdef play(self):\n\t\tmoves = [self.__up_swipe, self.__left_swipe, self.__down_swipe, self.__right_swipe]\n\t\tmovement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')}\n\t\tboard_copy = Board(self.nrows, self.ncols)\n\t\tlost = False\n\t\twhile self.board.max() < 2048:\n\t\t\tself.__display()\n\t\t\traw_input_value = input(\"Play with WASD: \").upper()\n\t\t\twhile len(raw_input_value) == 0:\n\t\t\t\traw_input_value = input(\"Play with WASD: \").upper()\n\t\t\tdirection = raw_input_value[0]\n\t\t\twhile direction not in movement_mapping:\n\t\t\t\traw_input_value = input(\"Play with WASD: \").upper()\n\t\t\t\twhile len(raw_input_value) == 0:\n\t\t\t\t\traw_input_value = input(\"Play with WASD: \").upper()\n\t\t\t\tdirection = raw_input_value[0]\n\t\t\tdid_move = movement_mapping[direction]()\n\t\t\tif did_move:\n\t\t\t\tself.__add_new_numbers()\n\n\t\t\t\t# TODO: Make this more efficient\n\t\t\t\tboard_copy.board = self.board.copy()\n\t\t\t\tcan_swipe_up = board_copy.__up_swipe()\n\t\t\t\tcan_swipe_left = board_copy.__left_swipe()\n\t\t\t\tif not (can_swipe_left or can_swipe_up):\n\t\t\t\t\tlost = True\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(f\"'{direction}'\" + ' is an INVALID MOVE')\n\n\t\tif not lost:\n\t\t\tprint('GAME WON')\n\t\telse:\n\t\t\tprint('GAME LOST')\n\t\tself.__display()\n\t\toutput = self.score\n\t\tself.reset()\n\t\treturn output\n\n\n\nif __name__ == \"__main__\":\n\tb = Board(4, 4)\n\tb.play()",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 05:48:57 2019
@author: emama
"""
import datetime as dt
t = dt.datetime.today()
print(t)
|
normal
|
{
"blob_id": "b1fbc8f3616b70e5d35898fd895c37e838c87dc9",
"index": 9293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(t)\n",
"step-3": "<mask token>\nt = dt.datetime.today()\nprint(t)\n",
"step-4": "<mask token>\nimport datetime as dt\nt = dt.datetime.today()\nprint(t)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 31 05:48:57 2019\r\n\r\n@author: emama\r\n\"\"\"\r\n\r\nimport datetime as dt\r\n\r\nt = dt.datetime.today()\r\nprint(t)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register(r'lamps', LampViewSet)
|
normal
|
{
"blob_id": "aff1d702e591efcfc0fc93150a3fbec532408137",
"index": 55,
"step-1": "<mask token>\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-4": "from rest_framework import serializers, viewsets, routers\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-5": "from rest_framework import serializers, viewsets, routers\n\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'lamps', LampViewSet)\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
from GRAFICA_BRESENHAMS import Bresenhams
def main():
x = int(input('INGRESA VALOR PARA X: \n'))
y = int(input('INGRESA VALOR PARA Y: \n'))
x1 = int(input('INGRESA VALOR PARA X1: \n'))
y1 = int(input('INGRESA VALOR PARA Y1: \n'))
Bresenhams(x, y, x1, y1)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "e75bee4e014aa369131c3e200ce874a8840b5690",
"index": 3573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from GRAFICA_BRESENHAMS import Bresenhams\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from jox_api import label_image,Mysql,Utils
from jox_config import api_base_url
import json
class Menu():
def __init__(self):
self.mysqlClass = Mysql.MySQL()
self.timeClass = Utils.Time()
def get_menu(self,type,openid):
try:
if type == 'mine':
self.sql = "SELECT * FROM get_menu WHERE openid=\'%s\' order by watch DESC " % (openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata'] }
else:
return {'code': -1}
elif type == 'main':
self.sql = "SELECT * FROM get_menu order by watch DESC"
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata'][0:3],'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def get_menu_info(self,menu_id):
try:
self.sql = "SELECT * FROM menu WHERE id=%s" % (menu_id)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
self.menu= self.resql['alldata'][0]
self.sql3 = "UPDATE menu SET watch=%s where id=%s" %(self.menu['watch']+1,menu_id)
self.resql3 = self.mysqlClass.insert_data(self.sql3)
print(self.resql3)
self.sql2 = "SELECT * FROM menu_step WHERE menu_id=%s order by num ASC " % (menu_id)
self.resql2 = self.mysqlClass.select_data(self.sql2)
self.step_list = []
if self.resql2['state'] != 'E':
for ai_menu_log in self.resql2['alldata']:
self.step_list.append(ai_menu_log)
self.menu['menu_step'] = self.step_list
return {'code': 1008, 'menu_info': self.menu}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def add_menu(self,data,openid):
try:
self.create_time = self.timeClass.get_time()
self.sql ='''
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')
'''% (openid, data.title,data.photo,data.material,data.accessories,data.ingredient, self.create_time)
self.resql = self.mysqlClass.add_insert(self.sql,"")
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = '''
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,\'%s\',\'%s\',\'%s\')
'''% (self.menu_id, step['num'],step['content'],step['image'], self.create_time)
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] !='E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
|
normal
|
{
"blob_id": "4fa9d16f979acf3edce05a209e1c6636e50fc315",
"index": 222,
"step-1": "<mask token>\n\n\nclass Menu:\n <mask token>\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n <mask token>\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-2": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n <mask token>\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-3": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self, menu_id):\n try:\n self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu = self.resql['alldata'][0]\n self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.\n menu['watch'] + 1, menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = (\n 'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '\n % menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-4": "from jox_api import label_image, Mysql, Utils\nfrom jox_config import api_base_url\nimport json\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self, menu_id):\n try:\n self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu = self.resql['alldata'][0]\n self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.\n menu['watch'] + 1, menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = (\n 'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '\n % menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-5": "from jox_api import label_image,Mysql,Utils\nfrom jox_config import api_base_url\nimport json\nclass Menu():\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self,type,openid):\n try:\n if type == 'mine':\n self.sql = \"SELECT * FROM get_menu WHERE openid=\\'%s\\' order by watch DESC \" % (openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n\n return {'code': 1007, 'menu_list': self.resql['alldata'] }\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = \"SELECT * FROM get_menu order by watch DESC\"\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata'][0:3],'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self,menu_id):\n try:\n self.sql = \"SELECT * FROM menu WHERE id=%s\" % (menu_id)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu= self.resql['alldata'][0]\n self.sql3 = \"UPDATE menu SET watch=%s where id=%s\" %(self.menu['watch']+1,menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = \"SELECT * FROM menu_step WHERE menu_id=%s order by num ASC \" % (menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self,data,openid):\n try:\n self.create_time = self.timeClass.get_time()\n\n self.sql ='''\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES (\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\')\n '''% (openid, data.title,data.photo,data.material,data.accessories,data.ingredient, self.create_time)\n\n self.resql = self.mysqlClass.add_insert(self.sql,\"\")\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n\n for step in steps:\n print(step['num'])\n self.sql2 = '''\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,\\'%s\\',\\'%s\\',\\'%s\\')\n '''% (self.menu_id, step['num'],step['content'],step['image'], self.create_time)\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] !='E':\n return {'code': 1010}\n else:\n return {'code': -1}\n\n except Exception as e:\n print(str(e))\n return {'code': -1}",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
gp = pd.read_csv('graph6.csv')
N=gp['Starting-node'].max()
M=gp['Ending-node'].max()
N=max(N,M)
gp=gp.sort_values(by='Cost')
gp=gp.reset_index()
gp=gp.reset_index()
gp['tree label']=gp['level_0']
index=gp['index'].max()
gp.drop('index',axis=1,inplace=True)
gp.drop('level_0',axis=1,inplace=True)
for n in range(index+1):
Count=[]
Visit=[]
Visit2=[]
for i in range(11):
Count.append(0)
for m in range(n):
if gp.iloc[n]['Starting-node']==gp.iloc[m]['Starting-node']:
if gp.iloc[n]['Starting-node'] not in Visit2:
Count[gp.iloc[m,3]]+=1
Visit2.append(gp.iloc[n]['Starting-node'])
Visit.append(gp.iloc[m,3])
if gp.iloc[n]['Starting-node']==gp.iloc[m]['Ending-node']:
if gp.iloc[n]['Starting-node'] not in Visit2:
Count[gp.iloc[m,3]]+=1
Visit2.append(gp.iloc[n]['Starting-node'])
Visit.append(gp.iloc[m,3])
if gp.iloc[n]['Ending-node']==gp.iloc[m]['Starting-node']:
if gp.iloc[n]['Ending-node'] not in Visit2:
Count[gp.iloc[m,3]]+=1
Visit2.append(gp.iloc[n]['Ending-node'])
Visit.append(gp.iloc[m,3])
if gp.iloc[n]['Ending-node']==gp.iloc[m]['Ending-node']:
if gp.iloc[n]['Ending-node'] not in Visit2:
Count[gp.iloc[m,3]]+=1
Visit2.append(gp.iloc[n]['Ending-node'])
Visit.append(gp.iloc[m,3])
if max(Count)==2:
gp.iloc[n,0]=0
gp.iloc[n,1]=0
gp.iloc[n,2]=0
gp.iloc[n,3]=0
# gp.drop(n,axis=0,inplace=True)
if max(Count)!=2:
for k in range(n):
if gp.iloc[k,3] in Visit:
gp.iloc[k,3]=n
gp.head(11)
# In[ ]:
|
normal
|
{
"blob_id": "719f7b7b2d8df037583263588e93d884ab3820fe",
"index": 5963,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngp.drop('index', axis=1, inplace=True)\ngp.drop('level_0', axis=1, inplace=True)\nfor n in range(index + 1):\n Count = []\n Visit = []\n Visit2 = []\n for i in range(11):\n Count.append(0)\n for m in range(n):\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if max(Count) == 2:\n gp.iloc[n, 0] = 0\n gp.iloc[n, 1] = 0\n gp.iloc[n, 2] = 0\n gp.iloc[n, 3] = 0\n if max(Count) != 2:\n for k in range(n):\n if gp.iloc[k, 3] in Visit:\n gp.iloc[k, 3] = n\ngp.head(11)\n",
"step-3": "<mask token>\ngp = pd.read_csv('graph6.csv')\nN = gp['Starting-node'].max()\nM = gp['Ending-node'].max()\nN = max(N, M)\ngp = gp.sort_values(by='Cost')\ngp = gp.reset_index()\ngp = gp.reset_index()\ngp['tree label'] = gp['level_0']\nindex = gp['index'].max()\ngp.drop('index', axis=1, inplace=True)\ngp.drop('level_0', axis=1, inplace=True)\nfor n in range(index + 1):\n Count = []\n Visit = []\n Visit2 = []\n for i in range(11):\n Count.append(0)\n for m in range(n):\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if max(Count) == 2:\n gp.iloc[n, 0] = 0\n gp.iloc[n, 1] = 0\n gp.iloc[n, 2] = 0\n gp.iloc[n, 3] = 0\n if max(Count) != 2:\n for k in range(n):\n if gp.iloc[k, 3] in Visit:\n gp.iloc[k, 3] = n\ngp.head(11)\n",
"step-4": "import pandas as pd\ngp = pd.read_csv('graph6.csv')\nN = gp['Starting-node'].max()\nM = gp['Ending-node'].max()\nN = max(N, M)\ngp = gp.sort_values(by='Cost')\ngp = gp.reset_index()\ngp = gp.reset_index()\ngp['tree label'] = gp['level_0']\nindex = gp['index'].max()\ngp.drop('index', axis=1, inplace=True)\ngp.drop('level_0', axis=1, inplace=True)\nfor n in range(index + 1):\n Count = []\n Visit = []\n Visit2 = []\n for i in range(11):\n Count.append(0)\n for m in range(n):\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Starting-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if gp.iloc[n]['Ending-node'] == gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m, 3]] += 1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m, 3])\n if max(Count) == 2:\n gp.iloc[n, 0] = 0\n gp.iloc[n, 1] = 0\n gp.iloc[n, 2] = 0\n gp.iloc[n, 3] = 0\n if max(Count) != 2:\n for k in range(n):\n if gp.iloc[k, 3] in Visit:\n gp.iloc[k, 3] = n\ngp.head(11)\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\ngp = pd.read_csv('graph6.csv')\n\n\nN=gp['Starting-node'].max()\nM=gp['Ending-node'].max()\nN=max(N,M)\n\n \n\ngp=gp.sort_values(by='Cost')\ngp=gp.reset_index()\ngp=gp.reset_index()\ngp['tree label']=gp['level_0']\nindex=gp['index'].max()\ngp.drop('index',axis=1,inplace=True)\ngp.drop('level_0',axis=1,inplace=True)\n\n\nfor n in range(index+1):\n Count=[]\n Visit=[]\n Visit2=[]\n for i in range(11):\n Count.append(0)\n \n for m in range(n):\n\n if gp.iloc[n]['Starting-node']==gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m,3]]+=1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m,3])\n\n if gp.iloc[n]['Starting-node']==gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Starting-node'] not in Visit2:\n Count[gp.iloc[m,3]]+=1\n Visit2.append(gp.iloc[n]['Starting-node'])\n Visit.append(gp.iloc[m,3])\n\n if gp.iloc[n]['Ending-node']==gp.iloc[m]['Starting-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m,3]]+=1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m,3])\n\n if gp.iloc[n]['Ending-node']==gp.iloc[m]['Ending-node']:\n if gp.iloc[n]['Ending-node'] not in Visit2:\n Count[gp.iloc[m,3]]+=1\n Visit2.append(gp.iloc[n]['Ending-node'])\n Visit.append(gp.iloc[m,3])\n if max(Count)==2:\n\n gp.iloc[n,0]=0\n gp.iloc[n,1]=0\n gp.iloc[n,2]=0\n gp.iloc[n,3]=0\n# gp.drop(n,axis=0,inplace=True) \n if max(Count)!=2:\n for k in range(n):\n if gp.iloc[k,3] in Visit:\n gp.iloc[k,3]=n\ngp.head(11)\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# models.py- Team
from django.db import models
class Team(models.Model):
teamName = models.TextField()
#Seasons associated
#Registrants unique
return
|
normal
|
{
"blob_id": "331b5f0a34db4d12d713439db3d2818e8c922310",
"index": 4236,
"step-1": "<mask token>\n\n\nclass Team(models.Model):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-4": "from django.db import models\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-5": "# models.py- Team\nfrom django.db import models\n\n\nclass Team(models.Model):\n \n teamName = models.TextField()\n\n #Seasons associated\n #Registrants unique\n\nreturn \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from apps.virt.views import node, domain,device,cluster,home
urlpatterns = patterns('',
# Home
url(r'^$', home.HomeView.as_view(), name='home'),
# Cluster
url(r'^cluster/status/$', cluster.ClusterStatusView.as_view(), name='cluster_status'),
# Node
url(r'^node/list/$', node.NodeListView.as_view(), name='node_list'),
url(r'^node/add/$', node.NodeCreateView.as_view(), name='node_add'),
url(r'^node/(?P<pk>\d+)/libvirt/$', node.NodeLibvirtView.as_view(), name='node_libvirt'),
url(r'^node/(?P<pk>\d+)/libvirt/update/$', node.UpdateCapabilitiesView.as_view(), name='node_libvirt_update'),
url(r'^node/(?P<pk>\d+)/libvirt/update/domains/$', node.UpdateDomainsView.as_view(), name='node_libvirt_updatedomains'),
url(r'^node/(?P<pk>\d+)/libvirt/create/domains/$', node.CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'),
url(r'^node/(?P<pk>\d+)/edit/$', node.NodeUpdateView.as_view(), name='node_edit'),
url(r'^node/(?P<pk>\d+)/delete/$', node.NodeDeleteView.as_view(), name='node_delete'),
# Domain
url(r'^domain/list/$', domain.DomainListView.as_view(), name='domain_list'),
url(r'^domain/add/$', domain.DomainCreateView.as_view(), name='domain_add'),
url(r'^domain/(?P<pk>\d+)/libvirt/$', domain.DomainLibvirtView.as_view(), name='domain_libvirt'),
url(r'^domain/(?P<pk>\d+)/edit/$', domain.DomainUpdateView.as_view(), name='domain_edit'),
url(r'^domain/(?P<pk>\d+)/delete/$', domain.DomainDeleteView.as_view(), name='domain_delete'),
url(r'^domain/(?P<pk>\d+)/libvirt/create/$', domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'),
url(r'^domain/(?P<pk>\d+)/libvirt/reboot/$', domain.LibvirtRebootView.as_view(), name='domain_libvirt_reboot'),
url(r'^domain/(?P<pk>\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.as_view(), name='domain_libvirt_shutdown'),
url(r'^domain/(?P<pk>\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.as_view(), name='domain_libvirt_destroy'),
url(r'^domain/(?P<pk>\d+)/libvirt/migrate/(?P<node_pk>\d+)/$', domain.LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'),
url(r'^domain/(?P<pk>\d+)/libvirt/resume/$', domain.LibvirtResumeView.as_view(), name='domain_libvirt_resume'),
url(r'^domain/(?P<pk>\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.as_view(), name='domain_libvirt_suspend'),
# Device
url(r'^domain/(?P<pk>\d+)/device/(?P<type>\w+)/add/$', device.DeviceCreateView.as_view(), name="device_add"),
url(r'^device/(?P<pk>\d+)/$', device.DeviceUpdateView.as_view(), name="device_edit"),
url(r'^device/(?P<pk>\d+)/attach/$', device.DeviceAttachView.as_view(), name="device_attach"),
url(r'^device/(?P<pk>\d+)/detach/$', device.DeviceDetachView.as_view(), name="device_detach"),
url(r'^device/(?P<pk>\d+)/delete/$', device.DeviceDeleteView.as_view(), name="device_delete")
)
|
normal
|
{
"blob_id": "484d104a8481a707a187d0bcb30898c3459a88be",
"index": 389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),\n url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=\n 'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),\n name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),\n name='node_add'), url('^node/(?P<pk>\\\\d+)/libvirt/$', node.\n NodeLibvirtView.as_view(), name='node_libvirt'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.\n as_view(), name='node_libvirt_update'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.\n as_view(), name='node_libvirt_updatedomains'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/create/domains/$', node.\n CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url\n ('^node/(?P<pk>\\\\d+)/edit/$', node.NodeUpdateView.as_view(), name=\n 'node_edit'), url('^node/(?P<pk>\\\\d+)/delete/$', node.NodeDeleteView.\n as_view(), name='node_delete'), url('^domain/list/$', domain.\n DomainListView.as_view(), name='domain_list'), url('^domain/add/$',\n domain.DomainCreateView.as_view(), name='domain_add'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),\n name='domain_libvirt'), url('^domain/(?P<pk>\\\\d+)/edit/$', domain.\n DomainUpdateView.as_view(), name='domain_edit'), url(\n '^domain/(?P<pk>\\\\d+)/delete/$', domain.DomainDeleteView.as_view(),\n name='domain_delete'), url('^domain/(?P<pk>\\\\d+)/libvirt/create/$',\n domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.\n as_view(), name='domain_libvirt_reboot'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.\n as_view(), name='domain_libvirt_shutdown'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.\n as_view(), name='domain_libvirt_destroy'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/migrate/(?P<node_pk>\\\\d+)/$', domain.\n LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/resume/$', domain.LibvirtResumeView.\n as_view(), name='domain_libvirt_resume'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.\n as_view(), name='domain_libvirt_suspend'), url(\n '^domain/(?P<pk>\\\\d+)/device/(?P<type>\\\\w+)/add/$', device.\n DeviceCreateView.as_view(), name='device_add'), url(\n '^device/(?P<pk>\\\\d+)/$', device.DeviceUpdateView.as_view(), name=\n 'device_edit'), url('^device/(?P<pk>\\\\d+)/attach/$', device.\n DeviceAttachView.as_view(), name='device_attach'), url(\n '^device/(?P<pk>\\\\d+)/detach/$', device.DeviceDetachView.as_view(),\n name='device_detach'), url('^device/(?P<pk>\\\\d+)/delete/$', device.\n DeviceDeleteView.as_view(), name='device_delete'))\n",
"step-3": "from django.conf.urls import patterns, include, url\nfrom apps.virt.views import node, domain, device, cluster, home\nurlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),\n url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=\n 'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),\n name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),\n name='node_add'), url('^node/(?P<pk>\\\\d+)/libvirt/$', node.\n NodeLibvirtView.as_view(), name='node_libvirt'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.\n as_view(), name='node_libvirt_update'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.\n as_view(), name='node_libvirt_updatedomains'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/create/domains/$', node.\n CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url\n ('^node/(?P<pk>\\\\d+)/edit/$', node.NodeUpdateView.as_view(), name=\n 'node_edit'), url('^node/(?P<pk>\\\\d+)/delete/$', node.NodeDeleteView.\n as_view(), name='node_delete'), url('^domain/list/$', domain.\n DomainListView.as_view(), name='domain_list'), url('^domain/add/$',\n domain.DomainCreateView.as_view(), name='domain_add'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),\n name='domain_libvirt'), url('^domain/(?P<pk>\\\\d+)/edit/$', domain.\n DomainUpdateView.as_view(), name='domain_edit'), url(\n '^domain/(?P<pk>\\\\d+)/delete/$', domain.DomainDeleteView.as_view(),\n name='domain_delete'), url('^domain/(?P<pk>\\\\d+)/libvirt/create/$',\n domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.\n as_view(), name='domain_libvirt_reboot'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.\n as_view(), name='domain_libvirt_shutdown'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.\n as_view(), name='domain_libvirt_destroy'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/migrate/(?P<node_pk>\\\\d+)/$', domain.\n LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/resume/$', domain.LibvirtResumeView.\n as_view(), name='domain_libvirt_resume'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.\n as_view(), name='domain_libvirt_suspend'), url(\n '^domain/(?P<pk>\\\\d+)/device/(?P<type>\\\\w+)/add/$', device.\n DeviceCreateView.as_view(), name='device_add'), url(\n '^device/(?P<pk>\\\\d+)/$', device.DeviceUpdateView.as_view(), name=\n 'device_edit'), url('^device/(?P<pk>\\\\d+)/attach/$', device.\n DeviceAttachView.as_view(), name='device_attach'), url(\n '^device/(?P<pk>\\\\d+)/detach/$', device.DeviceDetachView.as_view(),\n name='device_detach'), url('^device/(?P<pk>\\\\d+)/delete/$', device.\n DeviceDeleteView.as_view(), name='device_delete'))\n",
"step-4": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\nfrom apps.virt.views import node, domain,device,cluster,home\n\nurlpatterns = patterns('',\n\n # Home \n url(r'^$', home.HomeView.as_view(), name='home'),\n\n # Cluster \n url(r'^cluster/status/$', cluster.ClusterStatusView.as_view(), name='cluster_status'),\n\n # Node\n url(r'^node/list/$', node.NodeListView.as_view(), name='node_list'),\n url(r'^node/add/$', node.NodeCreateView.as_view(), name='node_add'),\n url(r'^node/(?P<pk>\\d+)/libvirt/$', node.NodeLibvirtView.as_view(), name='node_libvirt'),\n url(r'^node/(?P<pk>\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.as_view(), name='node_libvirt_update'),\n url(r'^node/(?P<pk>\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.as_view(), name='node_libvirt_updatedomains'),\n url(r'^node/(?P<pk>\\d+)/libvirt/create/domains/$', node.CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'),\n url(r'^node/(?P<pk>\\d+)/edit/$', node.NodeUpdateView.as_view(), name='node_edit'),\n url(r'^node/(?P<pk>\\d+)/delete/$', node.NodeDeleteView.as_view(), name='node_delete'),\n\n\n # Domain\n url(r'^domain/list/$', domain.DomainListView.as_view(), name='domain_list'),\n url(r'^domain/add/$', domain.DomainCreateView.as_view(), name='domain_add'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(), name='domain_libvirt'),\n url(r'^domain/(?P<pk>\\d+)/edit/$', domain.DomainUpdateView.as_view(), name='domain_edit'),\n url(r'^domain/(?P<pk>\\d+)/delete/$', domain.DomainDeleteView.as_view(), name='domain_delete'),\n \n url(r'^domain/(?P<pk>\\d+)/libvirt/create/$', domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.as_view(), name='domain_libvirt_reboot'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.as_view(), name='domain_libvirt_shutdown'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.as_view(), name='domain_libvirt_destroy'),\n\n url(r'^domain/(?P<pk>\\d+)/libvirt/migrate/(?P<node_pk>\\d+)/$', domain.LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/resume/$', domain.LibvirtResumeView.as_view(), name='domain_libvirt_resume'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.as_view(), name='domain_libvirt_suspend'),\n\n\n # Device\n url(r'^domain/(?P<pk>\\d+)/device/(?P<type>\\w+)/add/$', device.DeviceCreateView.as_view(), name=\"device_add\"),\n url(r'^device/(?P<pk>\\d+)/$', device.DeviceUpdateView.as_view(), name=\"device_edit\"),\n url(r'^device/(?P<pk>\\d+)/attach/$', device.DeviceAttachView.as_view(), name=\"device_attach\"),\n url(r'^device/(?P<pk>\\d+)/detach/$', device.DeviceDetachView.as_view(), name=\"device_detach\"),\n url(r'^device/(?P<pk>\\d+)/delete/$', device.DeviceDeleteView.as_view(), name=\"device_delete\")\n\n\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
file_open = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv", 'r', encoding='UTF8')
save_file = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv", 'w', encoding='UTF8',newline='')
write = csv.writer(save_file)
hidden_states = ['up', 'down']
pi = [0.5044, 0.4956]
state_space = pd.Series(pi, index=hidden_states, name='states')
print(state_space)
print('\n', state_space.sum())
stack = 0
x_a = ""
x_b = ""
y_a = ""
y_b = ""
before_application = ""
add = []
def count(a,b):
a = int(a)
b = int(b)
if a == 0 and b == 0:
return 0
elif a == 0 and b == 1:
return 1
elif a == 1 and b == 0:
return 2
elif a == 1 and b == 1:
return 3
while True:
line = file_open.readline()
if not line: break
result_x = []
result_y = []
add = []
if stack == 0:
a = line.split(',')[0]
a = a.strip()
add.append(a)
a = line.split(',')[1]
a = a.strip()
add.append(a)
a = line.split(',')[2]
a = a.strip()
add.append(a)
write.writerow(add)
stack = 1
elif stack == 1:
before_application = line.split(',')[0]
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
stack = 2
elif stack == 2:
if before_application == line.split(',')[0]:
x_b = line.split(',')[1]
x_b = x_b.strip()
y_b = line.split(',')[2]
y_b = y_b.strip()
result_x.append(x_a)
result_x.append(x_b)
result_y.append(y_a)
result_y.append(y_b)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 3
else:
pass
before_application = line.split(',')[0]
elif stack == 3:
if before_application == line.split(',')[0]:
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
result_x.append(x_b)
result_x.append(x_a)
result_y.append(y_b)
result_y.append(y_a)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 2
else:
pass
before_application = line.split(',')[0]
|
normal
|
{
"blob_id": "55977a673bb36900e1d797cb9ec330ce6d9aa717",
"index": 8232,
"step-1": "<mask token>\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(state_space)\nprint('\\n', state_space.sum())\n<mask token>\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-3": "<mask token>\nfile_open = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv',\n 'r', encoding='UTF8')\nsave_file = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv',\n 'w', encoding='UTF8', newline='')\nwrite = csv.writer(save_file)\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\nstack = 0\nx_a = ''\nx_b = ''\ny_a = ''\ny_b = ''\nbefore_application = ''\nadd = []\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\nfile_open = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv',\n 'r', encoding='UTF8')\nsave_file = open('C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv',\n 'w', encoding='UTF8', newline='')\nwrite = csv.writer(save_file)\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\nstack = 0\nx_a = ''\nx_b = ''\ny_a = ''\ny_b = ''\nbefore_application = ''\nadd = []\n\n\ndef count(a, b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\n\nwhile True:\n line = file_open.readline()\n if not line:\n break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n tol = count(result_x[0], result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\nfile_open = open(\"C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv\", 'r', encoding='UTF8')\nsave_file = open(\"C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv\", 'w', encoding='UTF8',newline='')\nwrite = csv.writer(save_file)\n\nhidden_states = ['up', 'down']\npi = [0.5044, 0.4956]\nstate_space = pd.Series(pi, index=hidden_states, name='states')\nprint(state_space)\nprint('\\n', state_space.sum())\n\nstack = 0\nx_a = \"\"\nx_b = \"\"\n\ny_a = \"\"\ny_b = \"\"\nbefore_application = \"\"\nadd = []\ndef count(a,b):\n a = int(a)\n b = int(b)\n if a == 0 and b == 0:\n return 0\n elif a == 0 and b == 1:\n return 1\n elif a == 1 and b == 0:\n return 2\n elif a == 1 and b == 1:\n return 3\n\nwhile True:\n line = file_open.readline()\n if not line: break\n result_x = []\n result_y = []\n add = []\n if stack == 0:\n a = line.split(',')[0]\n a = a.strip()\n add.append(a)\n a = line.split(',')[1]\n a = a.strip()\n add.append(a)\n a = line.split(',')[2]\n a = a.strip()\n add.append(a)\n write.writerow(add)\n\n stack = 1\n elif stack == 1:\n before_application = line.split(',')[0]\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n stack = 2\n\n elif stack == 2:\n if before_application == line.split(',')[0]:\n x_b = line.split(',')[1]\n x_b = x_b.strip()\n y_b = line.split(',')[2]\n y_b = y_b.strip()\n result_x.append(x_a)\n result_x.append(x_b)\n result_y.append(y_a)\n result_y.append(y_b)\n tol = count(result_x[0],result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 3\n else:\n pass\n before_application = line.split(',')[0]\n\n elif stack == 3:\n if before_application == line.split(',')[0]:\n x_a = line.split(',')[1]\n x_a = x_a.strip()\n y_a = line.split(',')[2]\n y_a = y_a.strip()\n result_x.append(x_b)\n result_x.append(x_a)\n result_y.append(y_b)\n result_y.append(y_a)\n\n tol = count(result_x[0],result_x[1])\n add.append(tol)\n tol = count(result_y[0], result_y[1])\n add.append(tol)\n write.writerow(add)\n stack = 2\n else:\n pass\n before_application = line.split(',')[0]\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: WuTian
# @Date : 2018/5/3
# @Contact : [email protected]
# @Desc :使用广度优先搜索查找芒果商
from collections import deque
graph = {}
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["alice"] = ["peggy"]
graph["claire"] = ["thom", "jonny"]
graph["anuj"] = []
graph["peggy"] = []
graph["thom"] = []
graph["jonny"] = []
def is_mango_seller(name):
return name[-1] == "m"
def search_mango_seller(name):
search_queue = deque()
searched = []
global graph
search_queue += graph[name]
while search_queue:
person = search_queue.popleft()
if not person in searched:
if is_mango_seller(person):
print("%s is a mango seller" % person)
return True
else:
search_queue += graph[person]
searched.append(person)
return False
search_mango_seller("you")
|
normal
|
{
"blob_id": "e881fcfce933d8f3bafcbaab039ddcf98827bf5e",
"index": 4244,
"step-1": "<mask token>\n\n\ndef is_mango_seller(name):\n return name[-1] == 'm'\n\n\ndef search_mango_seller(name):\n search_queue = deque()\n searched = []\n global graph\n search_queue += graph[name]\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if is_mango_seller(person):\n print('%s is a mango seller' % person)\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_mango_seller(name):\n return name[-1] == 'm'\n\n\ndef search_mango_seller(name):\n search_queue = deque()\n searched = []\n global graph\n search_queue += graph[name]\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if is_mango_seller(person):\n print('%s is a mango seller' % person)\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n\nsearch_mango_seller('you')\n",
"step-3": "<mask token>\ngraph = {}\ngraph['you'] = ['alice', 'bob', 'claire']\ngraph['bob'] = ['anuj', 'peggy']\ngraph['alice'] = ['peggy']\ngraph['claire'] = ['thom', 'jonny']\ngraph['anuj'] = []\ngraph['peggy'] = []\ngraph['thom'] = []\ngraph['jonny'] = []\n\n\ndef is_mango_seller(name):\n return name[-1] == 'm'\n\n\ndef search_mango_seller(name):\n search_queue = deque()\n searched = []\n global graph\n search_queue += graph[name]\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if is_mango_seller(person):\n print('%s is a mango seller' % person)\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n\nsearch_mango_seller('you')\n",
"step-4": "from collections import deque\ngraph = {}\ngraph['you'] = ['alice', 'bob', 'claire']\ngraph['bob'] = ['anuj', 'peggy']\ngraph['alice'] = ['peggy']\ngraph['claire'] = ['thom', 'jonny']\ngraph['anuj'] = []\ngraph['peggy'] = []\ngraph['thom'] = []\ngraph['jonny'] = []\n\n\ndef is_mango_seller(name):\n return name[-1] == 'm'\n\n\ndef search_mango_seller(name):\n search_queue = deque()\n searched = []\n global graph\n search_queue += graph[name]\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if is_mango_seller(person):\n print('%s is a mango seller' % person)\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n\nsearch_mango_seller('you')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: WuTian\n# @Date : 2018/5/3\n# @Contact : [email protected]\n# @Desc :使用广度优先搜索查找芒果商\nfrom collections import deque\n\ngraph = {}\ngraph[\"you\"] = [\"alice\", \"bob\", \"claire\"]\ngraph[\"bob\"] = [\"anuj\", \"peggy\"]\ngraph[\"alice\"] = [\"peggy\"]\ngraph[\"claire\"] = [\"thom\", \"jonny\"]\ngraph[\"anuj\"] = []\ngraph[\"peggy\"] = []\ngraph[\"thom\"] = []\ngraph[\"jonny\"] = []\n\n\ndef is_mango_seller(name):\n return name[-1] == \"m\"\n\n\ndef search_mango_seller(name):\n search_queue = deque()\n searched = []\n global graph\n search_queue += graph[name]\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if is_mango_seller(person):\n print(\"%s is a mango seller\" % person)\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\nsearch_mango_seller(\"you\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import mlcd,pygame,time,random
PLAYER_CHAR=">"
OBSTACLE_CHAR="|"
screenbuff=[[" "," "," "," "," "," "," "," "," "," "," "," "],
[" "," "," "," "," "," "," "," "," "," "," "," "]]
player={"position":0,"line":0,"score":000}
game={"speed":4.05,"level":2.5,"obstacle":0}
keys={"space":False,"quit":False,"next":False}
def keypress(): #get keypresses
global keys
keys["space"]=keys["quit"]=keys["next"]=False #reset all keys
#check keys
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys["space"] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys["quit"] = True
done=False
#initialize mlcd as 16x2 character lcd
mlcd.init(16,2)
lasttime=time.time()
curtime=0.0
while not done:
curtime=time.time()
if (curtime-lasttime>1/game["speed"]):
lasttime=curtime
#increment score and count obstacle
#up the level and increase the speed
if screenbuff[0][player["position"]]==OBSTACLE_CHAR or screenbuff[1][player["position"]]==OBSTACLE_CHAR:
player["score"]+=1
game["obstacle"]-=1
game["level"]+=0.5
game["speed"]+=0.05
#if((game["level"]+2)%game["posmovthres"]==0 and player["position"]<12 and screenbuff[player["line"]][player["position"]+1]!=OBSTACLE_CHAR and screenbuff[player["line"]][player["position"]+2]!=OBSTACLE_CHAR):
# player["position"]+=1
#move everything one place to the left
for lindex,lin in enumerate(screenbuff,start=0):
for index,pos in enumerate(lin, start=0):
if index>0:
screenbuff[lindex][index-1]=pos
#add new chars at end of buff , obstacles if there is a gap
screenbuff[0][-1]=" "
screenbuff[1][-1]=" "
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==1):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==0):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
#check for collision
if screenbuff[player["line"]][player["position"]]==OBSTACLE_CHAR:
done=True #player lost
#add player to the buffer
screenbuff[player["line"]][player["position"]]=PLAYER_CHAR
#ready the lines for drawing on lcd
lines=[''.join(screenbuff[0]) + "|scr",
''.join(screenbuff[1]) + "|"+str(player["score"])]
mlcd.draw(lines)
#remove player from buffer
screenbuff[player["line"]][player["position"]]=" "
#get keypresses
keypress()
#modify player line (move the player) if space is pressed
if keys["space"]:
if player["line"]==0:
player["line"]=1
else:
player["line"]=0
#quit
if keys["quit"]:
print("game quit")
done=True
pygame.quit()
|
normal
|
{
"blob_id": "aeaab602cbb9fa73992eb5259e8603ecb11ba333",
"index": 4863,
"step-1": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\nmlcd.init(16, 2)\n<mask token>\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-3": "<mask token>\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-4": "import mlcd, pygame, time, random\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-5": "import mlcd,pygame,time,random\n\nPLAYER_CHAR=\">\"\nOBSTACLE_CHAR=\"|\"\n\nscreenbuff=[[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"],\n [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]]\n\nplayer={\"position\":0,\"line\":0,\"score\":000}\ngame={\"speed\":4.05,\"level\":2.5,\"obstacle\":0} \nkeys={\"space\":False,\"quit\":False,\"next\":False}\n\ndef keypress(): #get keypresses\n global keys\n keys[\"space\"]=keys[\"quit\"]=keys[\"next\"]=False #reset all keys\n #check keys\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys[\"space\"] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys[\"quit\"] = True\n\n \n \n\ndone=False\n#initialize mlcd as 16x2 character lcd\nmlcd.init(16,2)\nlasttime=time.time()\ncurtime=0.0\n\nwhile not done:\n curtime=time.time()\n if (curtime-lasttime>1/game[\"speed\"]):\n lasttime=curtime\n\n\n #increment score and count obstacle\n #up the level and increase the speed\n if screenbuff[0][player[\"position\"]]==OBSTACLE_CHAR or screenbuff[1][player[\"position\"]]==OBSTACLE_CHAR:\n player[\"score\"]+=1\n game[\"obstacle\"]-=1\n game[\"level\"]+=0.5\n game[\"speed\"]+=0.05\n #if((game[\"level\"]+2)%game[\"posmovthres\"]==0 and player[\"position\"]<12 and screenbuff[player[\"line\"]][player[\"position\"]+1]!=OBSTACLE_CHAR and screenbuff[player[\"line\"]][player[\"position\"]+2]!=OBSTACLE_CHAR):\n # player[\"position\"]+=1\n\n #move everything one place to the left\n for lindex,lin in enumerate(screenbuff,start=0):\n for index,pos in enumerate(lin, start=0):\n if index>0:\n screenbuff[lindex][index-1]=pos\n \n #add new chars at end of buff , obstacles if there is a gap\n screenbuff[0][-1]=\" \"\n screenbuff[1][-1]=\" \"\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==1):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==0):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n \n\n #check for collision\n if screenbuff[player[\"line\"]][player[\"position\"]]==OBSTACLE_CHAR:\n done=True #player lost\n #add player to the buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=PLAYER_CHAR\n #ready the lines for drawing on lcd\n lines=[''.join(screenbuff[0]) + \"|scr\",\n ''.join(screenbuff[1]) + \"|\"+str(player[\"score\"])]\n mlcd.draw(lines)\n \n #remove player from buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=\" \"\n #get keypresses\n keypress()\n #modify player line (move the player) if space is pressed\n if keys[\"space\"]:\n if player[\"line\"]==0:\n player[\"line\"]=1\n else:\n player[\"line\"]=0\n #quit\n if keys[\"quit\"]:\n print(\"game quit\")\n done=True\npygame.quit()\n \n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
print(accuracy_score(true_labels, guesses))
print(recall_score(true_labels, guesses))
print(precision_score(true_labels, guesses))
print(f1_score(true_labels, guesses))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(true_labels, guesses))
|
normal
|
{
"blob_id": "faa53db9dd581b6508fb9e4042ec86ebaf850e60",
"index": 5320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(accuracy_score(true_labels, guesses))\nprint(recall_score(true_labels, guesses))\nprint(precision_score(true_labels, guesses))\nprint(f1_score(true_labels, guesses))\n<mask token>\nprint(confusion_matrix(true_labels, guesses))\n",
"step-3": "from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\nprint(accuracy_score(true_labels, guesses))\nprint(recall_score(true_labels, guesses))\nprint(precision_score(true_labels, guesses))\nprint(f1_score(true_labels, guesses))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(true_labels, guesses))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# SymBeam examples suit
# ==========================================================================================
# António Carneiro <[email protected]> 2020
# Features: 1. Numeric length
# 2. Pin
# 3. Two rollers
# 4. Numeric distributed constant load
# 5. Numeric distributed quadratic load
import matplotlib.pyplot as plt
from symbeam import beam
test_beam = beam(6, x0=0)
test_beam.add_support(0, "roller")
test_beam.add_support(2, "roller")
test_beam.add_support(6, "pin")
test_beam.add_support(4, "hinge")
test_beam.add_distributed_load(0, 4, -5)
test_beam.add_distributed_load(4, 6, "-(-3*(x-5)**2 + 8)")
test_beam.solve()
fig, ax = test_beam.plot()
plt.savefig(__file__.split(".py")[0] + ".svg")
|
normal
|
{
"blob_id": "bdbeebab70a6d69e7553807d48e3539b78b48add",
"index": 2946,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntest_beam.add_support(0, 'roller')\ntest_beam.add_support(2, 'roller')\ntest_beam.add_support(6, 'pin')\ntest_beam.add_support(4, 'hinge')\ntest_beam.add_distributed_load(0, 4, -5)\ntest_beam.add_distributed_load(4, 6, '-(-3*(x-5)**2 + 8)')\ntest_beam.solve()\n<mask token>\nplt.savefig(__file__.split('.py')[0] + '.svg')\n",
"step-3": "<mask token>\ntest_beam = beam(6, x0=0)\ntest_beam.add_support(0, 'roller')\ntest_beam.add_support(2, 'roller')\ntest_beam.add_support(6, 'pin')\ntest_beam.add_support(4, 'hinge')\ntest_beam.add_distributed_load(0, 4, -5)\ntest_beam.add_distributed_load(4, 6, '-(-3*(x-5)**2 + 8)')\ntest_beam.solve()\nfig, ax = test_beam.plot()\nplt.savefig(__file__.split('.py')[0] + '.svg')\n",
"step-4": "import matplotlib.pyplot as plt\nfrom symbeam import beam\ntest_beam = beam(6, x0=0)\ntest_beam.add_support(0, 'roller')\ntest_beam.add_support(2, 'roller')\ntest_beam.add_support(6, 'pin')\ntest_beam.add_support(4, 'hinge')\ntest_beam.add_distributed_load(0, 4, -5)\ntest_beam.add_distributed_load(4, 6, '-(-3*(x-5)**2 + 8)')\ntest_beam.solve()\nfig, ax = test_beam.plot()\nplt.savefig(__file__.split('.py')[0] + '.svg')\n",
"step-5": "# SymBeam examples suit\n# ==========================================================================================\n# António Carneiro <[email protected]> 2020\n# Features: 1. Numeric length\n# 2. Pin\n# 3. Two rollers\n# 4. Numeric distributed constant load\n# 5. Numeric distributed quadratic load\n\nimport matplotlib.pyplot as plt\n\nfrom symbeam import beam\n\n\ntest_beam = beam(6, x0=0)\ntest_beam.add_support(0, \"roller\")\ntest_beam.add_support(2, \"roller\")\ntest_beam.add_support(6, \"pin\")\ntest_beam.add_support(4, \"hinge\")\ntest_beam.add_distributed_load(0, 4, -5)\ntest_beam.add_distributed_load(4, 6, \"-(-3*(x-5)**2 + 8)\")\ntest_beam.solve()\nfig, ax = test_beam.plot()\n\nplt.savefig(__file__.split(\".py\")[0] + \".svg\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence
from flask import g
from sqlalchemy import or_, select
from sqlalchemy.orm import joinedload
from airflow.auth.managers.fab.models import Permission, Resource, Role, User
from airflow.auth.managers.fab.views.permissions import (
ActionModelView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.auth.managers.fab.views.roles_list import CustomRoleModelView
from airflow.auth.managers.fab.views.user import (
CustomUserDBModelView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
)
from airflow.auth.managers.fab.views.user_edit import (
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomUserInfoEditView,
)
from airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.extensions.init_auth_manager import get_auth_manager
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
if TYPE_CHECKING:
from sqlalchemy.orm import Session
SecurityManagerOverride: type = object
else:
# Fetch the security manager override from the auth manager
SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS: list[dict[str, Any]] = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) -> None:
super().__init__(
appbuilder=appbuilder,
actionmodelview=self.actionmodelview,
authdbview=self.authdbview,
authldapview=self.authldapview,
authoauthview=self.authoauthview,
authoidview=self.authoidview,
authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview,
registeruser_view=self.registeruser_view,
registeruserdbview=self.registeruserdbview,
registeruseroauthview=self.registeruseroauthview,
registerusermodelview=self.registerusermodelview,
registeruseroidview=self.registeruseroidview,
resetmypasswordview=self.resetmypasswordview,
resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview,
user_model=self.user_model,
userinfoeditview=self.userinfoeditview,
userdbmodelview=self.userdbmodelview,
userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview,
useroidmodelview=self.useroidmodelview,
userremoteusermodelview=self.userremoteusermodelview,
userstatschartview=self.userstatschartview,
)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) -> str:
if "." in dag_id:
dm = self.appbuilder.get_session.execute(
select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)
).one()
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) -> None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(
self,
user_actions: Container[str] | None,
user,
session: Session = NEW_SESSION,
) -> Iterable[DagModel]:
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(
self,
user,
user_actions: Container[str] | None = None,
session: Session = NEW_SESSION,
) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (
permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)
):
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
user_query = session.scalar(
select(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.where(User.id == user.id)
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {
dag.dag_id
for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))
}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) -> str:
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) -> bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) -> bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) -> bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.can_read_all_dags(user)
or self.can_edit_all_dags(user)
)
def can_edit_all_dags(self, user=None) -> bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) -> bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
def clean_perms(self) -> None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) -> None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(
select(self.permission_model).filter_by(action=action, resource=resource).limit(1)
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) -> None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name)
.join(self.permission_model.action)
.join(self.permission_model.resource)
)
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name, self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.where(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {
r.name: r
for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.permissions))
).unique()
}
def create_dag_specific_permissions(self) -> None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) -> None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(
select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) -> None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(
self,
dag_id: str,
access_control: dict[str, Collection[str]] | None = None,
) -> None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'", dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset.",
dag_resource_name,
)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) -> None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self,
perms: Sequence[tuple[str, str]] | None = None,
dag_id: str | None = None,
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: Session | None = None) -> None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: Session | None = None):
self.appbuilder = FakeAppBuilder(session)
|
normal
|
{
"blob_id": "47cee0c659976a2b74e2bb07f6c4d622ceab7362",
"index": 3866,
"step-1": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n <mask token>\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n <mask token>\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n <mask token>\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-2": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-3": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-4": "<mask token>\nEXISTING_ROLES = {'Admin', 'Viewer', 'User', 'Op', 'Public'}\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n SecurityManagerOverride: type = object\nelse:\n SecurityManagerOverride = get_auth_manager(\n ).get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n VIEWER_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_AUDIT_LOG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_CODE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_JOB), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_PLUGIN), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_XCOM), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_TASK_INSTANCE)]\n USER_PERMISSIONS = [(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_CREATE, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_CREATE,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_DAG_RUN)]\n OP_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_ADMIN_MENU), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONNECTION), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM)]\n ADMIN_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_ROLE), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_ROLE)]\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) ->None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n '`init_role` has been deprecated. Please use `bulk_sync_roles` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n self.bulk_sync_roles([{'role': role_name, 'perms': perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n\n @provide_session\n def get_accessible_dags(self, user_actions: (Container[str] | None),\n user, session: Session=NEW_SESSION) ->Iterable[DagModel]:\n warnings.warn(\n '`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=3)\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(\n dag_ids)))\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT,\n dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence\n\nfrom flask import g\nfrom sqlalchemy import or_, select\nfrom sqlalchemy.orm import joinedload\n\nfrom airflow.auth.managers.fab.models import Permission, Resource, Role, User\nfrom airflow.auth.managers.fab.views.permissions import (\n ActionModelView,\n PermissionPairModelView,\n ResourceModelView,\n)\nfrom airflow.auth.managers.fab.views.roles_list import CustomRoleModelView\nfrom airflow.auth.managers.fab.views.user import (\n CustomUserDBModelView,\n CustomUserLDAPModelView,\n CustomUserOAuthModelView,\n CustomUserOIDModelView,\n CustomUserRemoteUserModelView,\n)\nfrom airflow.auth.managers.fab.views.user_edit import (\n CustomResetMyPasswordView,\n CustomResetPasswordView,\n CustomUserInfoEditView,\n)\nfrom airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView\nfrom airflow.exceptions import AirflowException, RemovedInAirflow3Warning\nfrom airflow.models import DagBag, DagModel\nfrom airflow.security import permissions\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.session import NEW_SESSION, provide_session\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\nfrom airflow.www.fab_security.sqla.manager import SecurityManager\nfrom airflow.www.utils import CustomSQLAInterface\n\nEXISTING_ROLES = {\n \"Admin\",\n \"Viewer\",\n \"User\",\n \"Op\",\n \"Public\",\n}\n\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n\n SecurityManagerOverride: type = object\nelse:\n # Fetch the security manager override from the auth manager\n SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n\n ###########################################################################\n # PERMISSIONS\n ###########################################################################\n\n # [START security_viewer_perms]\n VIEWER_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),\n ]\n # [END security_viewer_perms]\n\n # [START security_user_perms]\n USER_PERMISSIONS = [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),\n ]\n # [END security_user_perms]\n\n # [START security_op_perms]\n OP_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),\n ]\n # [END security_op_perms]\n\n ADMIN_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),\n ]\n\n # global resource for dag-level access\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n\n ###########################################################################\n # DEFAULT ROLE CONFIGURATIONS\n ###########################################################################\n\n ROLE_CONFIGS: list[dict[str, Any]] = [\n {\"role\": \"Public\", \"perms\": []},\n {\"role\": \"Viewer\", \"perms\": VIEWER_PERMISSIONS},\n {\n \"role\": \"User\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS,\n },\n {\n \"role\": \"Op\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,\n },\n {\n \"role\": \"Admin\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,\n },\n ]\n\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) -> None:\n super().__init__(\n appbuilder=appbuilder,\n actionmodelview=self.actionmodelview,\n authdbview=self.authdbview,\n authldapview=self.authldapview,\n authoauthview=self.authoauthview,\n authoidview=self.authoidview,\n authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview,\n registeruser_view=self.registeruser_view,\n registeruserdbview=self.registeruserdbview,\n registeruseroauthview=self.registeruseroauthview,\n registerusermodelview=self.registerusermodelview,\n registeruseroidview=self.registeruseroidview,\n resetmypasswordview=self.resetmypasswordview,\n resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview,\n user_model=self.user_model,\n userinfoeditview=self.userinfoeditview,\n userdbmodelview=self.userdbmodelview,\n userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview,\n useroidmodelview=self.useroidmodelview,\n userremoteusermodelview=self.userremoteusermodelview,\n userstatschartview=self.userstatschartview,\n )\n\n # Go and fix up the SQLAInterface used from the stock one to our subclass.\n # This is needed to support the \"hack\" where we had to edit\n # FieldConverter.conversion_table in place in airflow.www.utils\n for attr in dir(self):\n if not attr.endswith(\"view\"):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, \"datamodel\", None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) -> str:\n if \".\" in dag_id:\n dm = self.appbuilder.get_session.execute(\n select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)\n ).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) -> None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n \"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n self.bulk_sync_roles([{\"role\": role_name, \"perms\": perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n\n for config in roles:\n role_name = config[\"role\"]\n perms = config[\"perms\"]\n role = existing_roles.get(role_name) or self.add_role(role_name)\n\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(\n action_name, resource_name\n )\n\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)\n\n def get_editable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)\n\n @provide_session\n def get_accessible_dags(\n self,\n user_actions: Container[str] | None,\n user,\n session: Session = NEW_SESSION,\n ) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=3,\n )\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))\n\n def get_readable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }\n\n def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != \"~\":\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))\n\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) -> str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n \"`prefixed_dag_id` has been deprecated. \"\n \"Please use `airflow.security.permissions.resource_name_for_dag` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) -> bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False\n\n def _has_role(self, role_name_or_list: Container, user) -> bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) -> bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return (\n self._has_role([\"Admin\", \"Viewer\", \"Op\", \"User\"], user)\n or self.can_read_all_dags(user)\n or self.can_edit_all_dags(user)\n )\n\n def can_edit_all_dags(self, user=None) -> bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) -> bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)\n\n def clean_perms(self) -> None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug(\"Cleaning faulty perms\")\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(\n or_(\n Permission.action == None, # noqa\n Permission.resource == None, # noqa\n )\n )\n # Since FAB doesn't define ON DELETE CASCADE on these tables, we need\n # to delete the _object_ so that SQLA knows to delete the many-to-many\n # relationship object too. :(\n\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info(\"Deleted %s faulty permissions\", deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) -> None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(\n select(self.permission_model).filter_by(action=action, resource=resource).limit(1)\n )\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) -> None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) -> set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )\n\n def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {\n (action_name, resource_name): viewmodel\n for action_name, resource_name, viewmodel in (\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name, self.permission_model)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n .where(~self.resource_model.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n )\n }\n\n def _get_all_roles_with_permissions(self) -> dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {\n r.name: r\n for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.permissions))\n ).unique()\n }\n\n def create_dag_specific_permissions(self) -> None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) -> None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()\n\n def sync_roles(self) -> None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) -> None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: Session | None = None) -> None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: Session | None = None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-ids": [
33,
36,
40,
47,
49
]
}
|
[
33,
36,
40,
47,
49
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'FormHello.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FormHello(object):
def setupUi(self, FormHello):
FormHello.setObjectName("FormHello")
FormHello.resize(705, 477)
self.LabelHello = QtWidgets.QLabel(FormHello)
self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.LabelHello.setFont(font)
self.LabelHello.setObjectName("LabelHello")
self.btnClose = QtWidgets.QPushButton(FormHello)
self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))
self.btnClose.setObjectName("btnClose")
self.retranslateUi(FormHello)
QtCore.QMetaObject.connectSlotsByName(FormHello)
def retranslateUi(self, FormHello):
_translate = QtCore.QCoreApplication.translate
FormHello.setWindowTitle(_translate("FormHello", "Demo2_2"))
self.LabelHello.setText(_translate("FormHello", " Hello, by UI Designer"))
self.btnClose.setText(_translate("FormHello", "关闭"))
|
normal
|
{
"blob_id": "fc20a2bf09d510892a4d144fbbd2cb2012c3ad98",
"index": 8579,
"step-1": "<mask token>\n\n\nclass Ui_FormHello(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'FormHello.ui'\n#\n# Created by: PyQt5 UI code generator 5.15.4\n#\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\n# run again. Do not edit this file unless you know what you are doing.\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n def setupUi(self, FormHello):\n FormHello.setObjectName(\"FormHello\")\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName(\"LabelHello\")\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName(\"btnClose\")\n\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate(\"FormHello\", \"Demo2_2\"))\n self.LabelHello.setText(_translate(\"FormHello\", \" Hello, by UI Designer\"))\n self.btnClose.setText(_translate(\"FormHello\", \"关闭\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#%%
### 날짜 데이터 분리
# 연-월-일 날짜 데이터에서 일부 분리 추출
import pandas as pd
df = pd.read_csv('../../datasets/part5/stock-data.csv')
# 문자열인 날짜 데이터를 판다스 Timestamp로 변환
df['new_Date'] = pd.to_datetime(df['Date']) # df에 새로운 열로 추가
print(df.head())
print()
# dt 속성을 이용하여 new_Data 열의 연-월-일 정보를 년, 월, 일로 구분
df['Year'] = df['new_Date'].dt.year
df['Month'] = df['new_Date'].dt.month
df['Day'] = df['new_Date'].dt.day
print(df.head())
print('------------------')
# Timestamp를 Period로 변환하여 연-월-일 표기 변경하기
# to_period() 메소드를 적용하여, 연-월-일 중 연-월 또는 연도를 추출
df['Date_yr'] = df['new_Date'].dt.to_period(freq='A') # 연도를 나타내는 값 저장
df['Date_m'] = df['new_Date'].dt.to_period(freq='M') # 연-월을 나타내는 값 저장
print(df.head())
print('------------------')
# 원하는 열을 행 인덱스로 지정
df.set_index('Date_m', inplace=True)
print(df.head())
# %%
|
normal
|
{
"blob_id": "d89e1d653c6db322feb6edba93cbfc622bf47aa2",
"index": 2781,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(df.head())\nprint()\n<mask token>\nprint(df.head())\nprint('------------------')\n<mask token>\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-3": "<mask token>\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\ndf['new_Date'] = pd.to_datetime(df['Date'])\nprint(df.head())\nprint()\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A')\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M')\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-4": "import pandas as pd\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\ndf['new_Date'] = pd.to_datetime(df['Date'])\nprint(df.head())\nprint()\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A')\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M')\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-5": "#%%\n\n### 날짜 데이터 분리\n# 연-월-일 날짜 데이터에서 일부 분리 추출\n\nimport pandas as pd\n\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\n\n# 문자열인 날짜 데이터를 판다스 Timestamp로 변환\ndf['new_Date'] = pd.to_datetime(df['Date']) # df에 새로운 열로 추가\nprint(df.head())\nprint()\n\n# dt 속성을 이용하여 new_Data 열의 연-월-일 정보를 년, 월, 일로 구분\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\n\n# Timestamp를 Period로 변환하여 연-월-일 표기 변경하기\n# to_period() 메소드를 적용하여, 연-월-일 중 연-월 또는 연도를 추출 \ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A') # 연도를 나타내는 값 저장\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M') # 연-월을 나타내는 값 저장\nprint(df.head())\nprint('------------------')\n\n# 원하는 열을 행 인덱스로 지정\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n# %%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import discord, requests
from random import choice
TOKEN = 'TOKEN'
CONTACT_EMAIL = None #'Contact email for getting 10000 words/day instead of 1000'
translate_command = '$t'
id_start = '<@!'
client = discord.Client()
def unescape(text):
return text.replace(''', '\'').replace('<','<').replace('>', '>') # to improve
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.content.startswith(translate_command):
lang = message.content[len(translate_command):message.content.find(' ')]
ttt = message.content[len(translate_command)+len(lang)+1:]
s = ttt.find(id_start)
while s != -1:
e = ttt.find('>',s)
ttt = ttt[:s]+client.get_user(int(ttt[s+len(id_start):e])).name+ttt[e:]
s = ttt.find(id_start)
body = {
'q': ttt,
'langpair': lang+'|en' if len(lang) == 2 else lang[:2]+'|'+lang[2:],
'de': CONTACT_EMAIL
}
r = requests.get('https://api.mymemory.translated.net/get', params=body)
message_sent = await message.channel.send(unescape(r.json()['responseData']['translatedText']))
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '❌'
try:
reaction, user = await client.wait_for('reaction_add', timeout=600.0, check=check)
except asyncio.TimeoutError:
pass
else:
await message_sent.delete()
client.run(TOKEN)
|
normal
|
{
"blob_id": "1ab69874a89311b22220dda541dfe03462a98a55",
"index": 2243,
"step-1": "<mask token>\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-3": "<mask token>\nTOKEN = 'TOKEN'\nCONTACT_EMAIL = None\ntranslate_command = '$t'\nid_start = '<@!'\nclient = discord.Client()\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-4": "import discord, requests\nfrom random import choice\nTOKEN = 'TOKEN'\nCONTACT_EMAIL = None\ntranslate_command = '$t'\nid_start = '<@!'\nclient = discord.Client()\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-5": "import discord, requests\r\nfrom random import choice\r\n\r\nTOKEN = 'TOKEN'\r\nCONTACT_EMAIL = None #'Contact email for getting 10000 words/day instead of 1000'\r\n\r\ntranslate_command = '$t'\r\nid_start = '<@!'\r\n\r\nclient = discord.Client()\r\n\r\ndef unescape(text):\r\n return text.replace(''', '\\'').replace('<','<').replace('>', '>') # to improve\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(f'{client.user} has connected to Discord!')\r\n\r\[email protected]\r\nasync def on_message(message):\r\n if message.content.startswith(translate_command):\r\n lang = message.content[len(translate_command):message.content.find(' ')]\r\n ttt = message.content[len(translate_command)+len(lang)+1:]\r\n s = ttt.find(id_start)\r\n while s != -1:\r\n e = ttt.find('>',s)\r\n ttt = ttt[:s]+client.get_user(int(ttt[s+len(id_start):e])).name+ttt[e:]\r\n s = ttt.find(id_start)\r\n body = {\r\n 'q': ttt,\r\n 'langpair': lang+'|en' if len(lang) == 2 else lang[:2]+'|'+lang[2:],\r\n 'de': CONTACT_EMAIL\r\n }\r\n r = requests.get('https://api.mymemory.translated.net/get', params=body)\r\n \r\n message_sent = await message.channel.send(unescape(r.json()['responseData']['translatedText']))\r\n \r\n def check(reaction, user):\r\n return user == message.author and str(reaction.emoji) == '❌'\r\n \r\n try:\r\n reaction, user = await client.wait_for('reaction_add', timeout=600.0, check=check)\r\n except asyncio.TimeoutError:\r\n pass\r\n else:\r\n await message_sent.delete()\r\n\r\nclient.run(TOKEN)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from pylab import *
def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
axes([0.025,0.025,0.95,0.95])
contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)
C = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
clabel(C, inline=1, fontsize=10)
xticks([]), yticks([])
savefig('../figures/contour_ex.png',dpi=48)
show()
|
normal
|
{
"blob_id": "e9c439eafac8fd689980ffcb562f3b5ee903dd56",
"index": 2604,
"step-1": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\n<mask token>\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-3": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-4": "from pylab import *\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-5": "from pylab import *\n\ndef f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)\n\nn = 256\nx = np.linspace(-3,3,n)\ny = np.linspace(-3,3,n)\nX,Y = np.meshgrid(x,y)\n\naxes([0.025,0.025,0.95,0.95])\n\ncontourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)\nC = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)\nclabel(C, inline=1, fontsize=10)\n\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png',dpi=48)\nshow()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
# @param arrive : list of integers
# @param depart : list of integers
# @param K : integer
# @return a boolean
def hotel(self, arrive, depart, K):
self.count = 0
self.temp = 0
for i in range(len(arrive)):
for j in range(i, len(depart)):
if arrive[j] < arrive[i]:
self.temp = arrive[j]
arrive[j] = arrive[i]
arrive[i] = self.temp
if depart[j] < depart[i]:
self.temp = depart[j]
depart[j] = depart[i]
depart[i] = self.temp
for i in range(len(arrive)):
self.x = i
while (arrive[self.x + 1] < depart[self.x]):
self.count = self.count + 1
self.x = self.x + 1
print ("Count: ",self.count)
print ("K: ", K)
print ("Arrive: ", arrive)
print ("Depart: ", depart)
if self.count < K:
return True
else:
return False
beg = 0
end = len(arrive)
mid = (beg + mid) / 2
for i in range(len(arrive)):
obj = Solution()
l1 = [1,2,3,4]
l2 = [10, 2, 6, 14]
k = 1
print obj.hotel(l1,l2,k)
|
normal
|
{
"blob_id": "de6a6c2dc7bea255e5674663616c962c1d1625e0",
"index": 4138,
"step-1": "class Solution:\n # @param arrive : list of integers\n # @param depart : list of integers\n # @param K : integer\n # @return a boolean\n def hotel(self, arrive, depart, K):\n self.count = 0\n self.temp = 0\n for i in range(len(arrive)):\n for j in range(i, len(depart)):\n if arrive[j] < arrive[i]:\n self.temp = arrive[j]\n arrive[j] = arrive[i]\n arrive[i] = self.temp\n if depart[j] < depart[i]:\n self.temp = depart[j]\n depart[j] = depart[i]\n depart[i] = self.temp\n for i in range(len(arrive)):\n self.x = i \n while (arrive[self.x + 1] < depart[self.x]):\n self.count = self.count + 1\n self.x = self.x + 1\n print (\"Count: \",self.count)\n print (\"K: \", K)\n print (\"Arrive: \", arrive)\n print (\"Depart: \", depart)\n if self.count < K:\n return True\n else:\n return False\n beg = 0\n end = len(arrive)\n mid = (beg + mid) / 2\n \n for i in range(len(arrive)):\n \n\nobj = Solution()\nl1 = [1,2,3,4]\nl2 = [10, 2, 6, 14]\nk = 1\nprint obj.hotel(l1,l2,k)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
getal1 = 5
getal2 = 7
getal3 = 8
getal4 = -4
getal5 = 2
print(getal1 * getal2 + getal3)
print(getal1 * (getal2 + getal3))
print(getal2 + getal3 / getal1)
print((getal2 + getal3) / getal1)
print(getal2 + getal3 % getal1)
print(abs(getal4 * getal1))
print(pow(getal3, getal5))
print(round(getal5 / getal2, 2))
print(max(getal1, getal2, getal3, getal4, getal5))
print(min(getal1, getal2, getal3, getal4, getal5))
print(math.sqrt(getal5 * getal3))
|
normal
|
{
"blob_id": "30d75aafd9612ac02557b947fc4e3c2f7322a7fd",
"index": 3555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-3": "<mask token>\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-4": "import math\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# SaveIsawQvector
import sys
import os
if os.path.exists("/opt/Mantid/bin"):
sys.path.append("/opt/mantidnightly/bin")
#sys.path.append("/opt/Mantid/bin") # Linux cluster
#sys.path.append('/opt/mantidunstable/bin')
else:
sys.path.append("C:/MantidInstall/bin") # Windows PC
# import mantid
from mantid.simpleapi import *
user_input = open('SaveIsawQvector.inp', 'r')
lineString = user_input.readline()
lineList = lineString.split()
data_directory = lineList[0]
lineString = user_input.readline()
lineList = lineString.split()
output_directory = lineList[0]
input_run_nums = open('monitorCtsAndAngles.dat', 'r')
min_tof = 2000
max_tof = 16500
start_time = 0.0
stop_time = 1.0e06
while True:
lineString = input_run_nums.readline()
lineList = lineString.split()
if len(lineList) == 0: break
run_num = lineList[0]
print run_num
full_name = data_directory + run_num + '_event.nxs'
event_ws = 'TOPAZ_' + run_num
LoadEventNexus( Filename = full_name, OutputWorkspace = event_ws,
FilterByTofMin = min_tof, FilterByTofMax = max_tof,
FilterByTimeStart = start_time, FilterByTimeStop = stop_time )
outputFile = output_directory + run_num + '_SaveIsawQvector.bin'
SaveIsawQvector(InputWorkspace = event_ws,
Filename = outputFile)
DeleteWorkspace(Workspace = event_ws)
print 'All done!'
|
normal
|
{
"blob_id": "b72bf00d156862c7bddecb396da3752be964ee66",
"index": 5463,
"step-1": "# SaveIsawQvector\r\n\r\nimport sys\nimport os\n\r\nif os.path.exists(\"/opt/Mantid/bin\"):\n sys.path.append(\"/opt/mantidnightly/bin\")\n #sys.path.append(\"/opt/Mantid/bin\") # Linux cluster\n #sys.path.append('/opt/mantidunstable/bin')\nelse:\n sys.path.append(\"C:/MantidInstall/bin\") # Windows PC\n\r\n# import mantid\r\nfrom mantid.simpleapi import *\n\nuser_input = open('SaveIsawQvector.inp', 'r')\n\nlineString = user_input.readline()\r\nlineList = lineString.split()\ndata_directory = lineList[0]\n\nlineString = user_input.readline()\r\nlineList = lineString.split()\noutput_directory = lineList[0]\r\n\ninput_run_nums = open('monitorCtsAndAngles.dat', 'r')\n\nmin_tof = 2000\r\nmax_tof = 16500\r\n\nstart_time = 0.0\nstop_time = 1.0e06\n\nwhile True:\r\n\n lineString = input_run_nums.readline()\n lineList = lineString.split()\n if len(lineList) == 0: break\n run_num = lineList[0]\n print run_num\r\n full_name = data_directory + run_num + '_event.nxs'\n\r\n event_ws = 'TOPAZ_' + run_num\n\r\n LoadEventNexus( Filename = full_name, OutputWorkspace = event_ws,\n FilterByTofMin = min_tof, FilterByTofMax = max_tof,\n FilterByTimeStart = start_time, FilterByTimeStop = stop_time )\n\n outputFile = output_directory + run_num + '_SaveIsawQvector.bin'\n\r\n SaveIsawQvector(InputWorkspace = event_ws, \n Filename = outputFile)\n \n DeleteWorkspace(Workspace = event_ws)\n\nprint 'All done!'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
definition of a sensor
"""
import datetime
import pytz
class tlimit:
def __init__(self, name, text):
self.name = name
self.text = text
time_limit = [
tlimit("All", "All Data"),
tlimit("day", "Current day"),
tlimit("24hours", "Last 24 hours"),
tlimit("3days", "Three last days"),
tlimit("7days", "Seven last days"),
tlimit("month", "Current month"),
tlimit("30days", "Last 30 days"),
tlimit("year", "Current year"),
]
tz = pytz.timezone("Europe/Paris")
utz = pytz.timezone("UTC")
def request_meteodata(request: str):
"""
execute a request in the MeteoData database
:param request: the request to execute
:return: the feteched result
"""
import MySQLdb
import platform
if platform.system() == "Windows":
MySQLParams = {
'host' : "192.168.5.1",
'user' : "MeteoRobot",
'passwd': "robot",
'db' : "MeteoData"
}
else:
MySQLParams = {
'host' : "localhost",
'user' : "MeteoRobot",
'passwd': "robot",
'db' : "MeteoData"
}
try:
con = MySQLdb.connect(**MySQLParams)
cur = con.cursor()
cur.execute(request)
con.commit()
data = cur.fetchall()
except MySQLdb.Error as err:
print(str(err))
return []
except Exception as err:
print(str(err))
return []
con.close()
return data
class SensorData:
date = datetime.datetime(1970, 1, 1, 0, 0, 0)
server_room_temperature = 0.0
server_room_humidity = 0.0
def __init__(self, d, t, h):
self.date = d
self.server_room_temperature = t
self.server_room_humidity = h
def __str__(self):
return str(self.date) + " {:.2f}°C {:.1f}%".format(self.server_room_temperature, self.server_room_humidity)
def get_data(last):
"""
get the database data on the last period
:param last: duration of the period
:return: the data
"""
Table = "ServerRoom"
filter = ""
if last == "lastone":
data = request_meteodata("SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 ")
if len(data) == 0:
return [SensorData(datetime.datetime.now(), 0, 0)]
res = []
for d in data:
res.append(SensorData(d[1], d[2], d[3]))
return res
if last != "All":
limit = datetime.datetime.now().astimezone(utz)
if last == "24hours":
limit -= datetime.timedelta(hours=24)
else:
limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)
if last == "3days":
limit -= datetime.timedelta(days=3)
elif last == "7days":
limit -= datetime.timedelta(days=7)
elif last == "month":
limit = limit.replace(day=1)
elif last == "30days":
limit -= datetime.timedelta(days=30)
elif last == "year":
limit = limit.replace(day=1, month=1)
filter = " WHERE `date` > '" + str(limit) + "'"
order = " ORDER BY `date` ASC"
req = "SELECT * FROM `" + Table + "`" + filter + order
data = request_meteodata(req)
if len(data) == 0:
print("no data: get all")
req = "SELECT * FROM `" + Table + "`" + order
data = request_meteodata(req)
res = []
for d in data:
res.append(SensorData(d[1], d[2], d[3]))
return res
def smooth_data(data, smooth_width):
"""
smooth the curve plotted by data
:param data: the input data
:param smooth_width: the width of the mobile average
:return: the smoothed data
"""
out = []
for i, dat in enumerate(data):
low = max(0, i - smooth_width)
high = min((len(data) - 1), low + 2 * smooth_width)
n = 0
s_temperature = 0
s_humidity = 0
for d in data[low:high]:
n += 1
s_temperature += d.server_room_temperature
s_humidity += d.server_room_humidity
s_temperature /= float(max(1, n))
s_humidity /= float(max(1, n))
out.append(SensorData(dat.date, s_temperature, s_humidity))
return out
def resample_data(data, entity_number):
"""
limit the amount of dat
:param data: input data
:param entity_number: maximum number of entity in output
:return: he resampled data
"""
if len(data) <= entity_number:
# not that many entity: nothing to do
return data
interval = int(len(data)/entity_number + 1)
out = []
for i, dat in enumerate(data):
if i % interval == 0:
out.append(dat)
return out
class displaydata:
"""
lass to encapsulate the meteo result to display
"""
def __init__(self):
self.temperature = "0"
self.temp_tendance = ""
self.temp_max = "0"
self.temp_min = "0"
self.temp_max_date = "0"
self.temp_min_date = "0"
self.temp_mean = "0"
self.humidity = "0"
self.hum_tendance = ""
self.hum_max = "0"
self.hum_min = "0"
self.hum_max_date = "0"
self.hum_min_date = "0"
self.hum_mean = "0"
def __tendance(self, dt, seuil):
if len(dt) < 3:
return "mdi-arrow-left-right-bold-outline tgreen"
if len(dt) > 20:
p1 = dt[-20]
p2 = dt[-10]
p3 = dt[-1]
else:
p1 = dt[0]
p2 = dt[len(dt)/2]
p3 = dt[-1]
if abs(p3 - p2) < seuil:
return "mdi-arrow-left-right-bold-outline tgreen"
elif (abs(p2 - p1) < seuil and p3 > p2) or (abs(p3 - p2) < seuil and p2 > p1):
return "mdi-arrow-top-right-bold-outline torange"
elif (abs(p2 - p1) < seuil and p3 < p2) or (abs(p3 - p2) < seuil and p2 < p1):
return "mdi-arrow-bottom-right-bold-outline tlightblue"
elif p1 > p2 > p3:
return "mdi-arrow-bottom-right-bold-outline tlightblue"
elif p1 < p2 < p3:
return "mdi-arrow-up-bold-outline tred"
else:
return "mdi-arrow-left-right-bold-outline tgreen"
def compute_from_data(self, dta, dha, date):
self.temp_max = -2000
self.temp_min = 2000
self.temp_mean = 0
for i, t in enumerate(dta):
self.temp_mean += t
if t > self.temp_max:
self.temp_max = t
self.temp_max_date = date[i]
if t < self.temp_min:
self.temp_min = t
self.temp_min_date = date[i]
if len(dta) > 0:
self.temp_mean = "{:.2f}".format(self.temp_mean / float(len(dta)))
self.temp_max = "{:.2f}".format(self.temp_max)
self.temp_min = "{:.2f}".format(self.temp_min)
self.temperature = "{:.2f}".format(dta[-1])
self.temp_tendance = self.__tendance(dta, 0.05)
self.hum_max = -2000
self.hum_min = 2000
self.hum_mean = 0
for i, t in enumerate(dha):
self.hum_mean += t
if t > self.hum_max:
self.hum_max = t
self.hum_max_date = date[i]
if t < self.hum_min:
self.hum_min = t
self.hum_min_date = date[i]
if len(dha) > 0:
self.hum_mean = "{:.2f}".format(self.hum_mean / float(len(dha)))
self.hum_max = "{:.2f}".format(self.hum_max)
self.hum_min = "{:.2f}".format(self.hum_min)
self.hum_tendance = self.__tendance(dha, 0.05)
self.humidity = "{:.2f}".format(dha[-1])
def getData(ll, smoo):
data = resample_data(get_data(ll), 1000)
if smoo > 0:
data = smooth_data(data, smoo)
print(len(data))
dates = []
temperatures = []
humidity = []
i = 0
for sset in data:
i += 1
dates.append(sset.date.strftime("%Y-%m-%d %H:%M:%S"))
temperatures.append(sset.server_room_temperature)
humidity.append(sset.server_room_humidity)
d = displaydata()
d.compute_from_data(temperatures, humidity, dates)
return dates, temperatures, humidity, d
def get_actual_data():
data = get_data("lastone")
return "{:.2f}".format(data[0].server_room_temperature), "{:.2f}".format(data[0].server_room_humidity)
|
normal
|
{
"blob_id": "cb9ea8791009a29a24a76bc2b161e7f8599fec1b",
"index": 5780,
"step-1": "<mask token>\n\n\nclass tlimit:\n\n def __init__(self, name, text):\n self.name = name\n self.text = text\n\n\n<mask token>\n\n\nclass SensorData:\n date = datetime.datetime(1970, 1, 1, 0, 0, 0)\n server_room_temperature = 0.0\n server_room_humidity = 0.0\n\n def __init__(self, d, t, h):\n self.date = d\n self.server_room_temperature = t\n self.server_room_humidity = h\n\n def __str__(self):\n return str(self.date) + ' {:.2f}°C {:.1f}%'.format(self.\n server_room_temperature, self.server_room_humidity)\n\n\n<mask token>\n\n\ndef smooth_data(data, smooth_width):\n \"\"\"\n smooth the curve plotted by data\n :param data: the input data\n :param smooth_width: the width of the mobile average\n :return: the smoothed data\n \"\"\"\n out = []\n for i, dat in enumerate(data):\n low = max(0, i - smooth_width)\n high = min(len(data) - 1, low + 2 * smooth_width)\n n = 0\n s_temperature = 0\n s_humidity = 0\n for d in data[low:high]:\n n += 1\n s_temperature += d.server_room_temperature\n s_humidity += d.server_room_humidity\n s_temperature /= float(max(1, n))\n s_humidity /= float(max(1, n))\n out.append(SensorData(dat.date, s_temperature, s_humidity))\n return out\n\n\n<mask token>\n\n\nclass displaydata:\n \"\"\"\n lass to encapsulate the meteo result to display\n \"\"\"\n\n def __init__(self):\n self.temperature = '0'\n self.temp_tendance = ''\n self.temp_max = '0'\n self.temp_min = '0'\n self.temp_max_date = '0'\n self.temp_min_date = '0'\n self.temp_mean = '0'\n self.humidity = '0'\n self.hum_tendance = ''\n self.hum_max = '0'\n self.hum_min = '0'\n self.hum_max_date = '0'\n self.hum_min_date = '0'\n self.hum_mean = '0'\n\n def __tendance(self, dt, seuil):\n if len(dt) < 3:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n if len(dt) > 20:\n p1 = dt[-20]\n p2 = dt[-10]\n p3 = dt[-1]\n else:\n p1 = dt[0]\n p2 = dt[len(dt) / 2]\n p3 = dt[-1]\n if abs(p3 - p2) < seuil:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n elif abs(p2 - p1) < seuil and p3 > p2 or abs(p3 - p2\n ) < seuil and p2 > p1:\n return 'mdi-arrow-top-right-bold-outline torange'\n elif abs(p2 - p1) < seuil and p3 < p2 or abs(p3 - p2\n ) < seuil and p2 < p1:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 > p2 > p3:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 < p2 < p3:\n return 'mdi-arrow-up-bold-outline tred'\n else:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n\n def compute_from_data(self, dta, dha, date):\n self.temp_max = -2000\n self.temp_min = 2000\n self.temp_mean = 0\n for i, t in enumerate(dta):\n self.temp_mean += t\n if t > self.temp_max:\n self.temp_max = t\n self.temp_max_date = date[i]\n if t < self.temp_min:\n self.temp_min = t\n self.temp_min_date = date[i]\n if len(dta) > 0:\n self.temp_mean = '{:.2f}'.format(self.temp_mean / float(len(dta)))\n self.temp_max = '{:.2f}'.format(self.temp_max)\n self.temp_min = '{:.2f}'.format(self.temp_min)\n self.temperature = '{:.2f}'.format(dta[-1])\n self.temp_tendance = self.__tendance(dta, 0.05)\n self.hum_max = -2000\n self.hum_min = 2000\n self.hum_mean = 0\n for i, t in enumerate(dha):\n self.hum_mean += t\n if t > self.hum_max:\n self.hum_max = t\n self.hum_max_date = date[i]\n if t < self.hum_min:\n self.hum_min = t\n self.hum_min_date = date[i]\n if len(dha) > 0:\n self.hum_mean = '{:.2f}'.format(self.hum_mean / float(len(dha)))\n self.hum_max = '{:.2f}'.format(self.hum_max)\n self.hum_min = '{:.2f}'.format(self.hum_min)\n self.hum_tendance = self.__tendance(dha, 0.05)\n self.humidity = '{:.2f}'.format(dha[-1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass tlimit:\n\n def __init__(self, name, text):\n self.name = name\n self.text = text\n\n\n<mask token>\n\n\nclass SensorData:\n date = datetime.datetime(1970, 1, 1, 0, 0, 0)\n server_room_temperature = 0.0\n server_room_humidity = 0.0\n\n def __init__(self, d, t, h):\n self.date = d\n self.server_room_temperature = t\n self.server_room_humidity = h\n\n def __str__(self):\n return str(self.date) + ' {:.2f}°C {:.1f}%'.format(self.\n server_room_temperature, self.server_room_humidity)\n\n\n<mask token>\n\n\ndef smooth_data(data, smooth_width):\n \"\"\"\n smooth the curve plotted by data\n :param data: the input data\n :param smooth_width: the width of the mobile average\n :return: the smoothed data\n \"\"\"\n out = []\n for i, dat in enumerate(data):\n low = max(0, i - smooth_width)\n high = min(len(data) - 1, low + 2 * smooth_width)\n n = 0\n s_temperature = 0\n s_humidity = 0\n for d in data[low:high]:\n n += 1\n s_temperature += d.server_room_temperature\n s_humidity += d.server_room_humidity\n s_temperature /= float(max(1, n))\n s_humidity /= float(max(1, n))\n out.append(SensorData(dat.date, s_temperature, s_humidity))\n return out\n\n\n<mask token>\n\n\nclass displaydata:\n \"\"\"\n lass to encapsulate the meteo result to display\n \"\"\"\n\n def __init__(self):\n self.temperature = '0'\n self.temp_tendance = ''\n self.temp_max = '0'\n self.temp_min = '0'\n self.temp_max_date = '0'\n self.temp_min_date = '0'\n self.temp_mean = '0'\n self.humidity = '0'\n self.hum_tendance = ''\n self.hum_max = '0'\n self.hum_min = '0'\n self.hum_max_date = '0'\n self.hum_min_date = '0'\n self.hum_mean = '0'\n\n def __tendance(self, dt, seuil):\n if len(dt) < 3:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n if len(dt) > 20:\n p1 = dt[-20]\n p2 = dt[-10]\n p3 = dt[-1]\n else:\n p1 = dt[0]\n p2 = dt[len(dt) / 2]\n p3 = dt[-1]\n if abs(p3 - p2) < seuil:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n elif abs(p2 - p1) < seuil and p3 > p2 or abs(p3 - p2\n ) < seuil and p2 > p1:\n return 'mdi-arrow-top-right-bold-outline torange'\n elif abs(p2 - p1) < seuil and p3 < p2 or abs(p3 - p2\n ) < seuil and p2 < p1:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 > p2 > p3:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 < p2 < p3:\n return 'mdi-arrow-up-bold-outline tred'\n else:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n\n def compute_from_data(self, dta, dha, date):\n self.temp_max = -2000\n self.temp_min = 2000\n self.temp_mean = 0\n for i, t in enumerate(dta):\n self.temp_mean += t\n if t > self.temp_max:\n self.temp_max = t\n self.temp_max_date = date[i]\n if t < self.temp_min:\n self.temp_min = t\n self.temp_min_date = date[i]\n if len(dta) > 0:\n self.temp_mean = '{:.2f}'.format(self.temp_mean / float(len(dta)))\n self.temp_max = '{:.2f}'.format(self.temp_max)\n self.temp_min = '{:.2f}'.format(self.temp_min)\n self.temperature = '{:.2f}'.format(dta[-1])\n self.temp_tendance = self.__tendance(dta, 0.05)\n self.hum_max = -2000\n self.hum_min = 2000\n self.hum_mean = 0\n for i, t in enumerate(dha):\n self.hum_mean += t\n if t > self.hum_max:\n self.hum_max = t\n self.hum_max_date = date[i]\n if t < self.hum_min:\n self.hum_min = t\n self.hum_min_date = date[i]\n if len(dha) > 0:\n self.hum_mean = '{:.2f}'.format(self.hum_mean / float(len(dha)))\n self.hum_max = '{:.2f}'.format(self.hum_max)\n self.hum_min = '{:.2f}'.format(self.hum_min)\n self.hum_tendance = self.__tendance(dha, 0.05)\n self.humidity = '{:.2f}'.format(dha[-1])\n\n\ndef getData(ll, smoo):\n data = resample_data(get_data(ll), 1000)\n if smoo > 0:\n data = smooth_data(data, smoo)\n print(len(data))\n dates = []\n temperatures = []\n humidity = []\n i = 0\n for sset in data:\n i += 1\n dates.append(sset.date.strftime('%Y-%m-%d %H:%M:%S'))\n temperatures.append(sset.server_room_temperature)\n humidity.append(sset.server_room_humidity)\n d = displaydata()\n d.compute_from_data(temperatures, humidity, dates)\n return dates, temperatures, humidity, d\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass tlimit:\n\n def __init__(self, name, text):\n self.name = name\n self.text = text\n\n\n<mask token>\n\n\nclass SensorData:\n date = datetime.datetime(1970, 1, 1, 0, 0, 0)\n server_room_temperature = 0.0\n server_room_humidity = 0.0\n\n def __init__(self, d, t, h):\n self.date = d\n self.server_room_temperature = t\n self.server_room_humidity = h\n\n def __str__(self):\n return str(self.date) + ' {:.2f}°C {:.1f}%'.format(self.\n server_room_temperature, self.server_room_humidity)\n\n\ndef get_data(last):\n \"\"\"\n get the database data on the last period\n :param last: duration of the period\n :return: the data\n \"\"\"\n Table = 'ServerRoom'\n filter = ''\n if last == 'lastone':\n data = request_meteodata(\n 'SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 ')\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != 'All':\n limit = datetime.datetime.now().astimezone(utz)\n if last == '24hours':\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == '3days':\n limit -= datetime.timedelta(days=3)\n elif last == '7days':\n limit -= datetime.timedelta(days=7)\n elif last == 'month':\n limit = limit.replace(day=1)\n elif last == '30days':\n limit -= datetime.timedelta(days=30)\n elif last == 'year':\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = ' ORDER BY `date` ASC'\n req = 'SELECT * FROM `' + Table + '`' + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print('no data: get all')\n req = 'SELECT * FROM `' + Table + '`' + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n\n\ndef smooth_data(data, smooth_width):\n \"\"\"\n smooth the curve plotted by data\n :param data: the input data\n :param smooth_width: the width of the mobile average\n :return: the smoothed data\n \"\"\"\n out = []\n for i, dat in enumerate(data):\n low = max(0, i - smooth_width)\n high = min(len(data) - 1, low + 2 * smooth_width)\n n = 0\n s_temperature = 0\n s_humidity = 0\n for d in data[low:high]:\n n += 1\n s_temperature += d.server_room_temperature\n s_humidity += d.server_room_humidity\n s_temperature /= float(max(1, n))\n s_humidity /= float(max(1, n))\n out.append(SensorData(dat.date, s_temperature, s_humidity))\n return out\n\n\n<mask token>\n\n\nclass displaydata:\n \"\"\"\n lass to encapsulate the meteo result to display\n \"\"\"\n\n def __init__(self):\n self.temperature = '0'\n self.temp_tendance = ''\n self.temp_max = '0'\n self.temp_min = '0'\n self.temp_max_date = '0'\n self.temp_min_date = '0'\n self.temp_mean = '0'\n self.humidity = '0'\n self.hum_tendance = ''\n self.hum_max = '0'\n self.hum_min = '0'\n self.hum_max_date = '0'\n self.hum_min_date = '0'\n self.hum_mean = '0'\n\n def __tendance(self, dt, seuil):\n if len(dt) < 3:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n if len(dt) > 20:\n p1 = dt[-20]\n p2 = dt[-10]\n p3 = dt[-1]\n else:\n p1 = dt[0]\n p2 = dt[len(dt) / 2]\n p3 = dt[-1]\n if abs(p3 - p2) < seuil:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n elif abs(p2 - p1) < seuil and p3 > p2 or abs(p3 - p2\n ) < seuil and p2 > p1:\n return 'mdi-arrow-top-right-bold-outline torange'\n elif abs(p2 - p1) < seuil and p3 < p2 or abs(p3 - p2\n ) < seuil and p2 < p1:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 > p2 > p3:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 < p2 < p3:\n return 'mdi-arrow-up-bold-outline tred'\n else:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n\n def compute_from_data(self, dta, dha, date):\n self.temp_max = -2000\n self.temp_min = 2000\n self.temp_mean = 0\n for i, t in enumerate(dta):\n self.temp_mean += t\n if t > self.temp_max:\n self.temp_max = t\n self.temp_max_date = date[i]\n if t < self.temp_min:\n self.temp_min = t\n self.temp_min_date = date[i]\n if len(dta) > 0:\n self.temp_mean = '{:.2f}'.format(self.temp_mean / float(len(dta)))\n self.temp_max = '{:.2f}'.format(self.temp_max)\n self.temp_min = '{:.2f}'.format(self.temp_min)\n self.temperature = '{:.2f}'.format(dta[-1])\n self.temp_tendance = self.__tendance(dta, 0.05)\n self.hum_max = -2000\n self.hum_min = 2000\n self.hum_mean = 0\n for i, t in enumerate(dha):\n self.hum_mean += t\n if t > self.hum_max:\n self.hum_max = t\n self.hum_max_date = date[i]\n if t < self.hum_min:\n self.hum_min = t\n self.hum_min_date = date[i]\n if len(dha) > 0:\n self.hum_mean = '{:.2f}'.format(self.hum_mean / float(len(dha)))\n self.hum_max = '{:.2f}'.format(self.hum_max)\n self.hum_min = '{:.2f}'.format(self.hum_min)\n self.hum_tendance = self.__tendance(dha, 0.05)\n self.humidity = '{:.2f}'.format(dha[-1])\n\n\ndef getData(ll, smoo):\n data = resample_data(get_data(ll), 1000)\n if smoo > 0:\n data = smooth_data(data, smoo)\n print(len(data))\n dates = []\n temperatures = []\n humidity = []\n i = 0\n for sset in data:\n i += 1\n dates.append(sset.date.strftime('%Y-%m-%d %H:%M:%S'))\n temperatures.append(sset.server_room_temperature)\n humidity.append(sset.server_room_humidity)\n d = displaydata()\n d.compute_from_data(temperatures, humidity, dates)\n return dates, temperatures, humidity, d\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass tlimit:\n\n def __init__(self, name, text):\n self.name = name\n self.text = text\n\n\ntime_limit = [tlimit('All', 'All Data'), tlimit('day', 'Current day'),\n tlimit('24hours', 'Last 24 hours'), tlimit('3days', 'Three last days'),\n tlimit('7days', 'Seven last days'), tlimit('month', 'Current month'),\n tlimit('30days', 'Last 30 days'), tlimit('year', 'Current year')]\ntz = pytz.timezone('Europe/Paris')\nutz = pytz.timezone('UTC')\n\n\ndef request_meteodata(request: str):\n \"\"\"\n execute a request in the MeteoData database\n :param request: the request to execute\n :return: the feteched result\n \"\"\"\n import MySQLdb\n import platform\n if platform.system() == 'Windows':\n MySQLParams = {'host': '192.168.5.1', 'user': 'MeteoRobot',\n 'passwd': 'robot', 'db': 'MeteoData'}\n else:\n MySQLParams = {'host': 'localhost', 'user': 'MeteoRobot', 'passwd':\n 'robot', 'db': 'MeteoData'}\n try:\n con = MySQLdb.connect(**MySQLParams)\n cur = con.cursor()\n cur.execute(request)\n con.commit()\n data = cur.fetchall()\n except MySQLdb.Error as err:\n print(str(err))\n return []\n except Exception as err:\n print(str(err))\n return []\n con.close()\n return data\n\n\nclass SensorData:\n date = datetime.datetime(1970, 1, 1, 0, 0, 0)\n server_room_temperature = 0.0\n server_room_humidity = 0.0\n\n def __init__(self, d, t, h):\n self.date = d\n self.server_room_temperature = t\n self.server_room_humidity = h\n\n def __str__(self):\n return str(self.date) + ' {:.2f}°C {:.1f}%'.format(self.\n server_room_temperature, self.server_room_humidity)\n\n\ndef get_data(last):\n \"\"\"\n get the database data on the last period\n :param last: duration of the period\n :return: the data\n \"\"\"\n Table = 'ServerRoom'\n filter = ''\n if last == 'lastone':\n data = request_meteodata(\n 'SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 ')\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != 'All':\n limit = datetime.datetime.now().astimezone(utz)\n if last == '24hours':\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == '3days':\n limit -= datetime.timedelta(days=3)\n elif last == '7days':\n limit -= datetime.timedelta(days=7)\n elif last == 'month':\n limit = limit.replace(day=1)\n elif last == '30days':\n limit -= datetime.timedelta(days=30)\n elif last == 'year':\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = ' ORDER BY `date` ASC'\n req = 'SELECT * FROM `' + Table + '`' + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print('no data: get all')\n req = 'SELECT * FROM `' + Table + '`' + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n\n\ndef smooth_data(data, smooth_width):\n \"\"\"\n smooth the curve plotted by data\n :param data: the input data\n :param smooth_width: the width of the mobile average\n :return: the smoothed data\n \"\"\"\n out = []\n for i, dat in enumerate(data):\n low = max(0, i - smooth_width)\n high = min(len(data) - 1, low + 2 * smooth_width)\n n = 0\n s_temperature = 0\n s_humidity = 0\n for d in data[low:high]:\n n += 1\n s_temperature += d.server_room_temperature\n s_humidity += d.server_room_humidity\n s_temperature /= float(max(1, n))\n s_humidity /= float(max(1, n))\n out.append(SensorData(dat.date, s_temperature, s_humidity))\n return out\n\n\ndef resample_data(data, entity_number):\n \"\"\"\n limit the amount of dat\n :param data: input data\n :param entity_number: maximum number of entity in output\n :return: he resampled data\n \"\"\"\n if len(data) <= entity_number:\n return data\n interval = int(len(data) / entity_number + 1)\n out = []\n for i, dat in enumerate(data):\n if i % interval == 0:\n out.append(dat)\n return out\n\n\nclass displaydata:\n \"\"\"\n lass to encapsulate the meteo result to display\n \"\"\"\n\n def __init__(self):\n self.temperature = '0'\n self.temp_tendance = ''\n self.temp_max = '0'\n self.temp_min = '0'\n self.temp_max_date = '0'\n self.temp_min_date = '0'\n self.temp_mean = '0'\n self.humidity = '0'\n self.hum_tendance = ''\n self.hum_max = '0'\n self.hum_min = '0'\n self.hum_max_date = '0'\n self.hum_min_date = '0'\n self.hum_mean = '0'\n\n def __tendance(self, dt, seuil):\n if len(dt) < 3:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n if len(dt) > 20:\n p1 = dt[-20]\n p2 = dt[-10]\n p3 = dt[-1]\n else:\n p1 = dt[0]\n p2 = dt[len(dt) / 2]\n p3 = dt[-1]\n if abs(p3 - p2) < seuil:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n elif abs(p2 - p1) < seuil and p3 > p2 or abs(p3 - p2\n ) < seuil and p2 > p1:\n return 'mdi-arrow-top-right-bold-outline torange'\n elif abs(p2 - p1) < seuil and p3 < p2 or abs(p3 - p2\n ) < seuil and p2 < p1:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 > p2 > p3:\n return 'mdi-arrow-bottom-right-bold-outline tlightblue'\n elif p1 < p2 < p3:\n return 'mdi-arrow-up-bold-outline tred'\n else:\n return 'mdi-arrow-left-right-bold-outline tgreen'\n\n def compute_from_data(self, dta, dha, date):\n self.temp_max = -2000\n self.temp_min = 2000\n self.temp_mean = 0\n for i, t in enumerate(dta):\n self.temp_mean += t\n if t > self.temp_max:\n self.temp_max = t\n self.temp_max_date = date[i]\n if t < self.temp_min:\n self.temp_min = t\n self.temp_min_date = date[i]\n if len(dta) > 0:\n self.temp_mean = '{:.2f}'.format(self.temp_mean / float(len(dta)))\n self.temp_max = '{:.2f}'.format(self.temp_max)\n self.temp_min = '{:.2f}'.format(self.temp_min)\n self.temperature = '{:.2f}'.format(dta[-1])\n self.temp_tendance = self.__tendance(dta, 0.05)\n self.hum_max = -2000\n self.hum_min = 2000\n self.hum_mean = 0\n for i, t in enumerate(dha):\n self.hum_mean += t\n if t > self.hum_max:\n self.hum_max = t\n self.hum_max_date = date[i]\n if t < self.hum_min:\n self.hum_min = t\n self.hum_min_date = date[i]\n if len(dha) > 0:\n self.hum_mean = '{:.2f}'.format(self.hum_mean / float(len(dha)))\n self.hum_max = '{:.2f}'.format(self.hum_max)\n self.hum_min = '{:.2f}'.format(self.hum_min)\n self.hum_tendance = self.__tendance(dha, 0.05)\n self.humidity = '{:.2f}'.format(dha[-1])\n\n\ndef getData(ll, smoo):\n data = resample_data(get_data(ll), 1000)\n if smoo > 0:\n data = smooth_data(data, smoo)\n print(len(data))\n dates = []\n temperatures = []\n humidity = []\n i = 0\n for sset in data:\n i += 1\n dates.append(sset.date.strftime('%Y-%m-%d %H:%M:%S'))\n temperatures.append(sset.server_room_temperature)\n humidity.append(sset.server_room_humidity)\n d = displaydata()\n d.compute_from_data(temperatures, humidity, dates)\n return dates, temperatures, humidity, d\n\n\ndef get_actual_data():\n data = get_data('lastone')\n return '{:.2f}'.format(data[0].server_room_temperature), '{:.2f}'.format(\n data[0].server_room_humidity)\n",
"step-5": "\"\"\"\ndefinition of a sensor\n\"\"\"\nimport datetime\nimport pytz\n\nclass tlimit:\n\n def __init__(self, name, text):\n self.name = name\n self.text = text\n\n\ntime_limit = [\n tlimit(\"All\", \"All Data\"),\n tlimit(\"day\", \"Current day\"),\n tlimit(\"24hours\", \"Last 24 hours\"),\n tlimit(\"3days\", \"Three last days\"),\n tlimit(\"7days\", \"Seven last days\"),\n tlimit(\"month\", \"Current month\"),\n tlimit(\"30days\", \"Last 30 days\"),\n tlimit(\"year\", \"Current year\"),\n]\n\ntz = pytz.timezone(\"Europe/Paris\")\nutz = pytz.timezone(\"UTC\")\n\n\ndef request_meteodata(request: str):\n \"\"\"\n execute a request in the MeteoData database\n :param request: the request to execute\n :return: the feteched result\n \"\"\"\n import MySQLdb\n import platform\n if platform.system() == \"Windows\":\n MySQLParams = {\n 'host' : \"192.168.5.1\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n else:\n MySQLParams = {\n 'host' : \"localhost\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n try:\n con = MySQLdb.connect(**MySQLParams)\n cur = con.cursor()\n cur.execute(request)\n con.commit()\n data = cur.fetchall()\n except MySQLdb.Error as err:\n print(str(err))\n return []\n except Exception as err:\n print(str(err))\n return []\n con.close()\n return data\n\n\nclass SensorData:\n date = datetime.datetime(1970, 1, 1, 0, 0, 0)\n server_room_temperature = 0.0\n server_room_humidity = 0.0\n\n def __init__(self, d, t, h):\n self.date = d\n self.server_room_temperature = t\n self.server_room_humidity = h\n\n def __str__(self):\n return str(self.date) + \" {:.2f}°C {:.1f}%\".format(self.server_room_temperature, self.server_room_humidity)\n\n\ndef get_data(last):\n \"\"\"\n get the database data on the last period\n :param last: duration of the period\n :return: the data\n \"\"\"\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n\n\ndef smooth_data(data, smooth_width):\n \"\"\"\n smooth the curve plotted by data\n :param data: the input data\n :param smooth_width: the width of the mobile average\n :return: the smoothed data\n \"\"\"\n out = []\n for i, dat in enumerate(data):\n low = max(0, i - smooth_width)\n high = min((len(data) - 1), low + 2 * smooth_width)\n n = 0\n s_temperature = 0\n s_humidity = 0\n for d in data[low:high]:\n n += 1\n s_temperature += d.server_room_temperature\n s_humidity += d.server_room_humidity\n s_temperature /= float(max(1, n))\n s_humidity /= float(max(1, n))\n out.append(SensorData(dat.date, s_temperature, s_humidity))\n return out\n\n\ndef resample_data(data, entity_number):\n \"\"\"\n limit the amount of dat\n :param data: input data\n :param entity_number: maximum number of entity in output\n :return: he resampled data\n \"\"\"\n if len(data) <= entity_number:\n # not that many entity: nothing to do\n return data\n interval = int(len(data)/entity_number + 1)\n out = []\n for i, dat in enumerate(data):\n if i % interval == 0:\n out.append(dat)\n return out\n\n\nclass displaydata:\n \"\"\"\n lass to encapsulate the meteo result to display\n \"\"\"\n def __init__(self):\n self.temperature = \"0\"\n self.temp_tendance = \"\"\n self.temp_max = \"0\"\n self.temp_min = \"0\"\n self.temp_max_date = \"0\"\n self.temp_min_date = \"0\"\n self.temp_mean = \"0\"\n self.humidity = \"0\"\n self.hum_tendance = \"\"\n self.hum_max = \"0\"\n self.hum_min = \"0\"\n self.hum_max_date = \"0\"\n self.hum_min_date = \"0\"\n self.hum_mean = \"0\"\n\n def __tendance(self, dt, seuil):\n if len(dt) < 3:\n return \"mdi-arrow-left-right-bold-outline tgreen\"\n if len(dt) > 20:\n p1 = dt[-20]\n p2 = dt[-10]\n p3 = dt[-1]\n else:\n p1 = dt[0]\n p2 = dt[len(dt)/2]\n p3 = dt[-1]\n if abs(p3 - p2) < seuil:\n return \"mdi-arrow-left-right-bold-outline tgreen\"\n elif (abs(p2 - p1) < seuil and p3 > p2) or (abs(p3 - p2) < seuil and p2 > p1):\n return \"mdi-arrow-top-right-bold-outline torange\"\n elif (abs(p2 - p1) < seuil and p3 < p2) or (abs(p3 - p2) < seuil and p2 < p1):\n return \"mdi-arrow-bottom-right-bold-outline tlightblue\"\n elif p1 > p2 > p3:\n return \"mdi-arrow-bottom-right-bold-outline tlightblue\"\n elif p1 < p2 < p3:\n return \"mdi-arrow-up-bold-outline tred\"\n else:\n return \"mdi-arrow-left-right-bold-outline tgreen\"\n\n def compute_from_data(self, dta, dha, date):\n self.temp_max = -2000\n self.temp_min = 2000\n self.temp_mean = 0\n for i, t in enumerate(dta):\n self.temp_mean += t\n if t > self.temp_max:\n self.temp_max = t\n self.temp_max_date = date[i]\n if t < self.temp_min:\n self.temp_min = t\n self.temp_min_date = date[i]\n if len(dta) > 0:\n self.temp_mean = \"{:.2f}\".format(self.temp_mean / float(len(dta)))\n self.temp_max = \"{:.2f}\".format(self.temp_max)\n self.temp_min = \"{:.2f}\".format(self.temp_min)\n self.temperature = \"{:.2f}\".format(dta[-1])\n self.temp_tendance = self.__tendance(dta, 0.05)\n\n self.hum_max = -2000\n self.hum_min = 2000\n self.hum_mean = 0\n for i, t in enumerate(dha):\n self.hum_mean += t\n if t > self.hum_max:\n self.hum_max = t\n self.hum_max_date = date[i]\n if t < self.hum_min:\n self.hum_min = t\n self.hum_min_date = date[i]\n if len(dha) > 0:\n self.hum_mean = \"{:.2f}\".format(self.hum_mean / float(len(dha)))\n self.hum_max = \"{:.2f}\".format(self.hum_max)\n self.hum_min = \"{:.2f}\".format(self.hum_min)\n self.hum_tendance = self.__tendance(dha, 0.05)\n self.humidity = \"{:.2f}\".format(dha[-1])\n\n\ndef getData(ll, smoo):\n data = resample_data(get_data(ll), 1000)\n if smoo > 0:\n data = smooth_data(data, smoo)\n print(len(data))\n dates = []\n temperatures = []\n humidity = []\n i = 0\n for sset in data:\n i += 1\n dates.append(sset.date.strftime(\"%Y-%m-%d %H:%M:%S\"))\n temperatures.append(sset.server_room_temperature)\n humidity.append(sset.server_room_humidity)\n d = displaydata()\n d.compute_from_data(temperatures, humidity, dates)\n return dates, temperatures, humidity, d\n\n\ndef get_actual_data():\n data = get_data(\"lastone\")\n return \"{:.2f}\".format(data[0].server_room_temperature), \"{:.2f}\".format(data[0].server_room_humidity)\n",
"step-ids": [
12,
13,
14,
18,
20
]
}
|
[
12,
13,
14,
18,
20
] |
from collections import defaultdict
squares = dict()
for i in range(2000):
squares[i * i] = i
perims = defaultdict(int)
for a in range(1, 1001):
for b in range(a + 1, 1001):
if a * a + b * b not in squares:
continue
c = squares[a * a + b * b]
perims[a + b + c] += 1
for perim, v in sorted(perims.items(), key=lambda x: x[1]):
if v > 1 and perim <= 1000:
print(perim, v)
|
normal
|
{
"blob_id": "a3299a2945a638c74c2d16bc28079ed692718fbd",
"index": 2703,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2000):\n squares[i * i] = i\n<mask token>\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-3": "<mask token>\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-4": "from collections import defaultdict\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from PyQt5.QtWidgets import QPushButton,QWidget,QApplication,QGridLayout,QListWidget,QLineEdit,QVBoxLayout,QLabel
import pyqtgraph as pg
import sys
import numpy as np
from tools import DataModel,HoldPositions
from load_sina import LoadNet
import time
from get_day_histroy import history
import pandas as pd
from volume import Volume
from PyQt5.QtCore import QThread, pyqtSignal, QDateTime
class addItemThread(QThread):
update_qvix = pyqtSignal(pd.DataFrame)
update_north = pyqtSignal(pd.DataFrame)
update_vol = pyqtSignal(pd.Series,pd.Series)
update_month = pyqtSignal(pd.DataFrame)
update_iv =pyqtSignal(pd.DataFrame,pd.DataFrame)
update_greek = pyqtSignal(list)
def __init__(self,*args, **kwargs):
super(addItemThread, self).__init__(*args, **kwargs)
self.data_model =DataModel()
self.num = 0
def run(self, *args, **kwargs):
while True:
df =LoadNet().get_QVIX()
self.update_qvix.emit(df)
df_north =LoadNet().get_north()
self.update_north.emit(df_north)
df_vol ,cha= Volume().update()
data ,last = LoadNet().get_50_163()
ser = (data['current']-last)/last
self.update_vol.emit(df_vol,ser)
if not self.data_model.df_op.empty:
df_month = self.data_model.iv_month_50300()
self.update_month.emit(df_month)
df_iv50,df_iv300 = self.data_model.get_iv()
self.update_iv.emit(df_iv50,df_iv300)
hp = HoldPositions()
greek = hp.update(self.data_model.df_op)
self.update_greek.emit(greek)
time.sleep(3)
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400,400,1200,620)
self.setWindowTitle("不被仓位左右思想,没找到弱点不要重仓")
self.gridLayout = QGridLayout(self)
self.plot()
'''
buttom
'''
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet("background-color:rgb(250,250,250)")
self.gridLayout.addWidget(self.label_greek, 2, 0,1,3)
'''
right
'''
# wight_r = QWidget(self)
# layout_r = QVBoxLayout()
# wight_r.setLayout(layout_r)
# btn_calculated = QPushButton('计算收益')
# layout_r.addWidget(btn_calculated)
# self.gridLayout.addWidget(wight_r, 0, 3,2,1)
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv50_2 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv50_3 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv50_4 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv50_5 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv50_6 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv50_7 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.plt_iv50_8 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv300_2 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv300_3 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv300_4 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv300_5 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv300_6 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv300_7 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.plt_iv300_8 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False,y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name="50")
self.plt_month300 = pw_month.plot(name="300")
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget( title='QVIX')
pw_qvix.showGrid(x=True,y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen("d",width=4),name="iv")
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget( title='NORTH')
pw_north.showGrid(x=False,y=True)
pw_north.addLegend()
self.plt_north_hgt =pw_north.plot(pen=pg.mkPen("b",width=2),name="hgt")
self.plt_north_sgt =pw_north.plot(pen=pg.mkPen("g",width=1),name="sgt")
self.plt_north_all =pw_north.plot(pen=pg.mkPen("d",width=1),name="all")
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget( title='VOLUME')
pw_volume.showGrid(x=False,y=True)
self.plt_volume =pw_volume.plot(name="volume")
self.stock_50 =pw_volume.plot(name="stock_50")
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self,df):
df = df.drop(['Pre','max','min'],axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self,df):
self.plt_north_hgt.setData( df['hgt'].astype(float)/10000)
self.plt_north_sgt.setData( df['sgt'].astype(float)/10000)
self.plt_north_all.setData(df['all'].astype(float)/10000)
def update_volume(self,data,ser):
self.plt_volume.setPen(pg.mkPen("b",width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self,data):
data.columns=['data','50iv','data2','300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen("r",width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen("b",width=1))
def update_iv(self,data50,data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50<1]=np.nan
self.plt_iv50_1.setData(data50.iloc[:,0])
self.plt_iv50_2.setData(data50.iloc[:,5])
self.plt_iv50_3.setData(data50.iloc[:,1])
self.plt_iv50_4.setData(data50.iloc[:,6])
self.plt_iv50_5.setData(data50.iloc[:,2])
self.plt_iv50_6.setData(data50.iloc[:,7])
self.plt_iv50_7.setData(data50.iloc[:,3])
self.plt_iv50_8.setData(data50.iloc[:,8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300<1]=np.nan
self.plt_iv300_1.setData(data300.iloc[:,0])
self.plt_iv300_2.setData(data300.iloc[:,5])
self.plt_iv300_3.setData(data300.iloc[:,1])
self.plt_iv300_4.setData(data300.iloc[:,6])
self.plt_iv300_5.setData(data300.iloc[:,2])
self.plt_iv300_6.setData(data300.iloc[:,7])
self.plt_iv300_7.setData(data300.iloc[:,3])
self.plt_iv300_8.setData(data300.iloc[:,8])
def update_greek(self,gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0],gk[1],gk[2],gk[3])
self.label_greek.setText(text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "8ddb7abb480ea8ee674c59719c0946f133ef0a4b",
"index": 1303,
"step-1": "<mask token>\n\n\nclass addItemThread(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n mthread = addItemThread()\n mthread.update_qvix.connect(self.update_qvix)\n mthread.update_north.connect(self.update_north)\n mthread.update_vol.connect(self.update_volume)\n mthread.update_month.connect(self.update_month)\n mthread.update_iv.connect(self.update_iv)\n mthread.update_greek.connect(self.update_greek)\n mthread.start()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(400, 400, 1200, 620)\n self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')\n self.gridLayout = QGridLayout(self)\n self.plot()\n \"\"\"\n buttom\n \"\"\"\n self.label_greek = QLabel('label_greek')\n self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')\n self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)\n \"\"\"\n right\n \"\"\"\n\n def plot(self):\n pg.setConfigOption('background', 'w')\n pg.setConfigOption('foreground', 'k')\n pw_iv50 = pg.PlotWidget(title='50-IV')\n self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(pw_iv50, 0, 0)\n plt300 = pg.PlotWidget(title='300-IV')\n self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(plt300, 0, 1)\n pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')\n pw_month.showGrid(x=False, y=True)\n pw_month.addLegend(offset=(30, 100))\n self.plt_month50 = pw_month.plot(name='50')\n self.plt_month300 = pw_month.plot(name='300')\n self.gridLayout.addWidget(pw_month, 0, 2)\n pw_qvix = pg.PlotWidget(title='QVIX')\n pw_qvix.showGrid(x=True, y=True)\n pw_qvix.addLegend()\n self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')\n self.gridLayout.addWidget(pw_qvix, 1, 0)\n pw_north = pg.PlotWidget(title='NORTH')\n pw_north.showGrid(x=False, y=True)\n pw_north.addLegend()\n self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name\n ='hgt')\n self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name\n ='sgt')\n self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name\n ='all')\n self.gridLayout.addWidget(pw_north, 1, 1)\n pw_volume = pg.PlotWidget(title='VOLUME')\n pw_volume.showGrid(x=False, y=True)\n self.plt_volume = pw_volume.plot(name='volume')\n self.stock_50 = pw_volume.plot(name='stock_50')\n self.gridLayout.addWidget(pw_volume, 1, 2)\n\n def update_qvix(self, df):\n df = df.drop(['Pre', 'max', 'min'], axis=1)\n self.plt_qvix.setData(df.index.values, df['QVIX'])\n\n def update_north(self, df):\n self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)\n self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)\n self.plt_north_all.setData(df['all'].astype(float) / 10000)\n\n def update_volume(self, data, ser):\n self.plt_volume.setPen(pg.mkPen('b', width=3))\n self.plt_volume.setData(data.values)\n self.stock_50.setData(ser)\n\n def update_month(self, data):\n data.columns = ['data', '50iv', 'data2', '300iv']\n self.plt_month50.setData(data['50iv'])\n self.plt_month50.setPen(pg.mkPen('r', width=2))\n self.plt_month300.setData(data['300iv'])\n self.plt_month300.setPen(pg.mkPen('b', width=1))\n\n def update_iv(self, data50, data300):\n data50.sort_index(inplace=True)\n data50 = data50.astype(float)\n data50[data50 < 1] = np.nan\n self.plt_iv50_1.setData(data50.iloc[:, 0])\n self.plt_iv50_2.setData(data50.iloc[:, 5])\n self.plt_iv50_3.setData(data50.iloc[:, 1])\n self.plt_iv50_4.setData(data50.iloc[:, 6])\n self.plt_iv50_5.setData(data50.iloc[:, 2])\n self.plt_iv50_6.setData(data50.iloc[:, 7])\n self.plt_iv50_7.setData(data50.iloc[:, 3])\n self.plt_iv50_8.setData(data50.iloc[:, 8])\n data300.sort_index(inplace=True)\n data300 = data300.astype(float)\n data300[data300 < 1] = np.nan\n self.plt_iv300_1.setData(data300.iloc[:, 0])\n self.plt_iv300_2.setData(data300.iloc[:, 5])\n self.plt_iv300_3.setData(data300.iloc[:, 1])\n self.plt_iv300_4.setData(data300.iloc[:, 6])\n self.plt_iv300_5.setData(data300.iloc[:, 2])\n self.plt_iv300_6.setData(data300.iloc[:, 7])\n self.plt_iv300_7.setData(data300.iloc[:, 3])\n self.plt_iv300_8.setData(data300.iloc[:, 8])\n\n def update_greek(self, gk):\n text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],\n gk[3])\n self.label_greek.setText(text)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass addItemThread(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(addItemThread, self).__init__(*args, **kwargs)\n self.data_model = DataModel()\n self.num = 0\n <mask token>\n\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n mthread = addItemThread()\n mthread.update_qvix.connect(self.update_qvix)\n mthread.update_north.connect(self.update_north)\n mthread.update_vol.connect(self.update_volume)\n mthread.update_month.connect(self.update_month)\n mthread.update_iv.connect(self.update_iv)\n mthread.update_greek.connect(self.update_greek)\n mthread.start()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(400, 400, 1200, 620)\n self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')\n self.gridLayout = QGridLayout(self)\n self.plot()\n \"\"\"\n buttom\n \"\"\"\n self.label_greek = QLabel('label_greek')\n self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')\n self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)\n \"\"\"\n right\n \"\"\"\n\n def plot(self):\n pg.setConfigOption('background', 'w')\n pg.setConfigOption('foreground', 'k')\n pw_iv50 = pg.PlotWidget(title='50-IV')\n self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(pw_iv50, 0, 0)\n plt300 = pg.PlotWidget(title='300-IV')\n self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(plt300, 0, 1)\n pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')\n pw_month.showGrid(x=False, y=True)\n pw_month.addLegend(offset=(30, 100))\n self.plt_month50 = pw_month.plot(name='50')\n self.plt_month300 = pw_month.plot(name='300')\n self.gridLayout.addWidget(pw_month, 0, 2)\n pw_qvix = pg.PlotWidget(title='QVIX')\n pw_qvix.showGrid(x=True, y=True)\n pw_qvix.addLegend()\n self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')\n self.gridLayout.addWidget(pw_qvix, 1, 0)\n pw_north = pg.PlotWidget(title='NORTH')\n pw_north.showGrid(x=False, y=True)\n pw_north.addLegend()\n self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name\n ='hgt')\n self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name\n ='sgt')\n self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name\n ='all')\n self.gridLayout.addWidget(pw_north, 1, 1)\n pw_volume = pg.PlotWidget(title='VOLUME')\n pw_volume.showGrid(x=False, y=True)\n self.plt_volume = pw_volume.plot(name='volume')\n self.stock_50 = pw_volume.plot(name='stock_50')\n self.gridLayout.addWidget(pw_volume, 1, 2)\n\n def update_qvix(self, df):\n df = df.drop(['Pre', 'max', 'min'], axis=1)\n self.plt_qvix.setData(df.index.values, df['QVIX'])\n\n def update_north(self, df):\n self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)\n self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)\n self.plt_north_all.setData(df['all'].astype(float) / 10000)\n\n def update_volume(self, data, ser):\n self.plt_volume.setPen(pg.mkPen('b', width=3))\n self.plt_volume.setData(data.values)\n self.stock_50.setData(ser)\n\n def update_month(self, data):\n data.columns = ['data', '50iv', 'data2', '300iv']\n self.plt_month50.setData(data['50iv'])\n self.plt_month50.setPen(pg.mkPen('r', width=2))\n self.plt_month300.setData(data['300iv'])\n self.plt_month300.setPen(pg.mkPen('b', width=1))\n\n def update_iv(self, data50, data300):\n data50.sort_index(inplace=True)\n data50 = data50.astype(float)\n data50[data50 < 1] = np.nan\n self.plt_iv50_1.setData(data50.iloc[:, 0])\n self.plt_iv50_2.setData(data50.iloc[:, 5])\n self.plt_iv50_3.setData(data50.iloc[:, 1])\n self.plt_iv50_4.setData(data50.iloc[:, 6])\n self.plt_iv50_5.setData(data50.iloc[:, 2])\n self.plt_iv50_6.setData(data50.iloc[:, 7])\n self.plt_iv50_7.setData(data50.iloc[:, 3])\n self.plt_iv50_8.setData(data50.iloc[:, 8])\n data300.sort_index(inplace=True)\n data300 = data300.astype(float)\n data300[data300 < 1] = np.nan\n self.plt_iv300_1.setData(data300.iloc[:, 0])\n self.plt_iv300_2.setData(data300.iloc[:, 5])\n self.plt_iv300_3.setData(data300.iloc[:, 1])\n self.plt_iv300_4.setData(data300.iloc[:, 6])\n self.plt_iv300_5.setData(data300.iloc[:, 2])\n self.plt_iv300_6.setData(data300.iloc[:, 7])\n self.plt_iv300_7.setData(data300.iloc[:, 3])\n self.plt_iv300_8.setData(data300.iloc[:, 8])\n\n def update_greek(self, gk):\n text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],\n gk[3])\n self.label_greek.setText(text)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass addItemThread(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(addItemThread, self).__init__(*args, **kwargs)\n self.data_model = DataModel()\n self.num = 0\n\n def run(self, *args, **kwargs):\n while True:\n df = LoadNet().get_QVIX()\n self.update_qvix.emit(df)\n df_north = LoadNet().get_north()\n self.update_north.emit(df_north)\n df_vol, cha = Volume().update()\n data, last = LoadNet().get_50_163()\n ser = (data['current'] - last) / last\n self.update_vol.emit(df_vol, ser)\n if not self.data_model.df_op.empty:\n df_month = self.data_model.iv_month_50300()\n self.update_month.emit(df_month)\n df_iv50, df_iv300 = self.data_model.get_iv()\n self.update_iv.emit(df_iv50, df_iv300)\n hp = HoldPositions()\n greek = hp.update(self.data_model.df_op)\n self.update_greek.emit(greek)\n time.sleep(3)\n\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n mthread = addItemThread()\n mthread.update_qvix.connect(self.update_qvix)\n mthread.update_north.connect(self.update_north)\n mthread.update_vol.connect(self.update_volume)\n mthread.update_month.connect(self.update_month)\n mthread.update_iv.connect(self.update_iv)\n mthread.update_greek.connect(self.update_greek)\n mthread.start()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(400, 400, 1200, 620)\n self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')\n self.gridLayout = QGridLayout(self)\n self.plot()\n \"\"\"\n buttom\n \"\"\"\n self.label_greek = QLabel('label_greek')\n self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')\n self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)\n \"\"\"\n right\n \"\"\"\n\n def plot(self):\n pg.setConfigOption('background', 'w')\n pg.setConfigOption('foreground', 'k')\n pw_iv50 = pg.PlotWidget(title='50-IV')\n self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(pw_iv50, 0, 0)\n plt300 = pg.PlotWidget(title='300-IV')\n self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(plt300, 0, 1)\n pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')\n pw_month.showGrid(x=False, y=True)\n pw_month.addLegend(offset=(30, 100))\n self.plt_month50 = pw_month.plot(name='50')\n self.plt_month300 = pw_month.plot(name='300')\n self.gridLayout.addWidget(pw_month, 0, 2)\n pw_qvix = pg.PlotWidget(title='QVIX')\n pw_qvix.showGrid(x=True, y=True)\n pw_qvix.addLegend()\n self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')\n self.gridLayout.addWidget(pw_qvix, 1, 0)\n pw_north = pg.PlotWidget(title='NORTH')\n pw_north.showGrid(x=False, y=True)\n pw_north.addLegend()\n self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name\n ='hgt')\n self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name\n ='sgt')\n self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name\n ='all')\n self.gridLayout.addWidget(pw_north, 1, 1)\n pw_volume = pg.PlotWidget(title='VOLUME')\n pw_volume.showGrid(x=False, y=True)\n self.plt_volume = pw_volume.plot(name='volume')\n self.stock_50 = pw_volume.plot(name='stock_50')\n self.gridLayout.addWidget(pw_volume, 1, 2)\n\n def update_qvix(self, df):\n df = df.drop(['Pre', 'max', 'min'], axis=1)\n self.plt_qvix.setData(df.index.values, df['QVIX'])\n\n def update_north(self, df):\n self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)\n self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)\n self.plt_north_all.setData(df['all'].astype(float) / 10000)\n\n def update_volume(self, data, ser):\n self.plt_volume.setPen(pg.mkPen('b', width=3))\n self.plt_volume.setData(data.values)\n self.stock_50.setData(ser)\n\n def update_month(self, data):\n data.columns = ['data', '50iv', 'data2', '300iv']\n self.plt_month50.setData(data['50iv'])\n self.plt_month50.setPen(pg.mkPen('r', width=2))\n self.plt_month300.setData(data['300iv'])\n self.plt_month300.setPen(pg.mkPen('b', width=1))\n\n def update_iv(self, data50, data300):\n data50.sort_index(inplace=True)\n data50 = data50.astype(float)\n data50[data50 < 1] = np.nan\n self.plt_iv50_1.setData(data50.iloc[:, 0])\n self.plt_iv50_2.setData(data50.iloc[:, 5])\n self.plt_iv50_3.setData(data50.iloc[:, 1])\n self.plt_iv50_4.setData(data50.iloc[:, 6])\n self.plt_iv50_5.setData(data50.iloc[:, 2])\n self.plt_iv50_6.setData(data50.iloc[:, 7])\n self.plt_iv50_7.setData(data50.iloc[:, 3])\n self.plt_iv50_8.setData(data50.iloc[:, 8])\n data300.sort_index(inplace=True)\n data300 = data300.astype(float)\n data300[data300 < 1] = np.nan\n self.plt_iv300_1.setData(data300.iloc[:, 0])\n self.plt_iv300_2.setData(data300.iloc[:, 5])\n self.plt_iv300_3.setData(data300.iloc[:, 1])\n self.plt_iv300_4.setData(data300.iloc[:, 6])\n self.plt_iv300_5.setData(data300.iloc[:, 2])\n self.plt_iv300_6.setData(data300.iloc[:, 7])\n self.plt_iv300_7.setData(data300.iloc[:, 3])\n self.plt_iv300_8.setData(data300.iloc[:, 8])\n\n def update_greek(self, gk):\n text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],\n gk[3])\n self.label_greek.setText(text)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass addItemThread(QThread):\n update_qvix = pyqtSignal(pd.DataFrame)\n update_north = pyqtSignal(pd.DataFrame)\n update_vol = pyqtSignal(pd.Series, pd.Series)\n update_month = pyqtSignal(pd.DataFrame)\n update_iv = pyqtSignal(pd.DataFrame, pd.DataFrame)\n update_greek = pyqtSignal(list)\n\n def __init__(self, *args, **kwargs):\n super(addItemThread, self).__init__(*args, **kwargs)\n self.data_model = DataModel()\n self.num = 0\n\n def run(self, *args, **kwargs):\n while True:\n df = LoadNet().get_QVIX()\n self.update_qvix.emit(df)\n df_north = LoadNet().get_north()\n self.update_north.emit(df_north)\n df_vol, cha = Volume().update()\n data, last = LoadNet().get_50_163()\n ser = (data['current'] - last) / last\n self.update_vol.emit(df_vol, ser)\n if not self.data_model.df_op.empty:\n df_month = self.data_model.iv_month_50300()\n self.update_month.emit(df_month)\n df_iv50, df_iv300 = self.data_model.get_iv()\n self.update_iv.emit(df_iv50, df_iv300)\n hp = HoldPositions()\n greek = hp.update(self.data_model.df_op)\n self.update_greek.emit(greek)\n time.sleep(3)\n\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n mthread = addItemThread()\n mthread.update_qvix.connect(self.update_qvix)\n mthread.update_north.connect(self.update_north)\n mthread.update_vol.connect(self.update_volume)\n mthread.update_month.connect(self.update_month)\n mthread.update_iv.connect(self.update_iv)\n mthread.update_greek.connect(self.update_greek)\n mthread.start()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(400, 400, 1200, 620)\n self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')\n self.gridLayout = QGridLayout(self)\n self.plot()\n \"\"\"\n buttom\n \"\"\"\n self.label_greek = QLabel('label_greek')\n self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')\n self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)\n \"\"\"\n right\n \"\"\"\n\n def plot(self):\n pg.setConfigOption('background', 'w')\n pg.setConfigOption('foreground', 'k')\n pw_iv50 = pg.PlotWidget(title='50-IV')\n self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(pw_iv50, 0, 0)\n plt300 = pg.PlotWidget(title='300-IV')\n self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=12, symbolBrush=(0, 255, 0))\n self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=10, symbolBrush=(0, 170, 0))\n self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=8, symbolBrush=(0, 85, 0))\n self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=\n 1), symbolSize=6, symbolBrush=(0, 0, 0))\n self.gridLayout.addWidget(plt300, 0, 1)\n pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')\n pw_month.showGrid(x=False, y=True)\n pw_month.addLegend(offset=(30, 100))\n self.plt_month50 = pw_month.plot(name='50')\n self.plt_month300 = pw_month.plot(name='300')\n self.gridLayout.addWidget(pw_month, 0, 2)\n pw_qvix = pg.PlotWidget(title='QVIX')\n pw_qvix.showGrid(x=True, y=True)\n pw_qvix.addLegend()\n self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')\n self.gridLayout.addWidget(pw_qvix, 1, 0)\n pw_north = pg.PlotWidget(title='NORTH')\n pw_north.showGrid(x=False, y=True)\n pw_north.addLegend()\n self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name\n ='hgt')\n self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name\n ='sgt')\n self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name\n ='all')\n self.gridLayout.addWidget(pw_north, 1, 1)\n pw_volume = pg.PlotWidget(title='VOLUME')\n pw_volume.showGrid(x=False, y=True)\n self.plt_volume = pw_volume.plot(name='volume')\n self.stock_50 = pw_volume.plot(name='stock_50')\n self.gridLayout.addWidget(pw_volume, 1, 2)\n\n def update_qvix(self, df):\n df = df.drop(['Pre', 'max', 'min'], axis=1)\n self.plt_qvix.setData(df.index.values, df['QVIX'])\n\n def update_north(self, df):\n self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)\n self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)\n self.plt_north_all.setData(df['all'].astype(float) / 10000)\n\n def update_volume(self, data, ser):\n self.plt_volume.setPen(pg.mkPen('b', width=3))\n self.plt_volume.setData(data.values)\n self.stock_50.setData(ser)\n\n def update_month(self, data):\n data.columns = ['data', '50iv', 'data2', '300iv']\n self.plt_month50.setData(data['50iv'])\n self.plt_month50.setPen(pg.mkPen('r', width=2))\n self.plt_month300.setData(data['300iv'])\n self.plt_month300.setPen(pg.mkPen('b', width=1))\n\n def update_iv(self, data50, data300):\n data50.sort_index(inplace=True)\n data50 = data50.astype(float)\n data50[data50 < 1] = np.nan\n self.plt_iv50_1.setData(data50.iloc[:, 0])\n self.plt_iv50_2.setData(data50.iloc[:, 5])\n self.plt_iv50_3.setData(data50.iloc[:, 1])\n self.plt_iv50_4.setData(data50.iloc[:, 6])\n self.plt_iv50_5.setData(data50.iloc[:, 2])\n self.plt_iv50_6.setData(data50.iloc[:, 7])\n self.plt_iv50_7.setData(data50.iloc[:, 3])\n self.plt_iv50_8.setData(data50.iloc[:, 8])\n data300.sort_index(inplace=True)\n data300 = data300.astype(float)\n data300[data300 < 1] = np.nan\n self.plt_iv300_1.setData(data300.iloc[:, 0])\n self.plt_iv300_2.setData(data300.iloc[:, 5])\n self.plt_iv300_3.setData(data300.iloc[:, 1])\n self.plt_iv300_4.setData(data300.iloc[:, 6])\n self.plt_iv300_5.setData(data300.iloc[:, 2])\n self.plt_iv300_6.setData(data300.iloc[:, 7])\n self.plt_iv300_7.setData(data300.iloc[:, 3])\n self.plt_iv300_8.setData(data300.iloc[:, 8])\n\n def update_greek(self, gk):\n text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],\n gk[3])\n self.label_greek.setText(text)\n\n\n<mask token>\n",
"step-5": "from PyQt5.QtWidgets import QPushButton,QWidget,QApplication,QGridLayout,QListWidget,QLineEdit,QVBoxLayout,QLabel\nimport pyqtgraph as pg\nimport sys\nimport numpy as np\nfrom tools import DataModel,HoldPositions\nfrom load_sina import LoadNet\nimport time\nfrom get_day_histroy import history\nimport pandas as pd \nfrom volume import Volume\nfrom PyQt5.QtCore import QThread, pyqtSignal, QDateTime\n\nclass addItemThread(QThread):\n update_qvix = pyqtSignal(pd.DataFrame)\n update_north = pyqtSignal(pd.DataFrame)\n update_vol = pyqtSignal(pd.Series,pd.Series)\n update_month = pyqtSignal(pd.DataFrame)\n update_iv =pyqtSignal(pd.DataFrame,pd.DataFrame)\n update_greek = pyqtSignal(list)\n \n def __init__(self,*args, **kwargs):\n super(addItemThread, self).__init__(*args, **kwargs)\n self.data_model =DataModel()\n self.num = 0\n \n def run(self, *args, **kwargs):\n while True:\n df =LoadNet().get_QVIX()\n self.update_qvix.emit(df)\n\n df_north =LoadNet().get_north()\n self.update_north.emit(df_north)\n\n df_vol ,cha= Volume().update()\n data ,last = LoadNet().get_50_163()\n ser = (data['current']-last)/last\n self.update_vol.emit(df_vol,ser)\n \n if not self.data_model.df_op.empty:\n df_month = self.data_model.iv_month_50300()\n self.update_month.emit(df_month)\n\n df_iv50,df_iv300 = self.data_model.get_iv()\n self.update_iv.emit(df_iv50,df_iv300)\n\n hp = HoldPositions()\n greek = hp.update(self.data_model.df_op)\n self.update_greek.emit(greek)\n\n time.sleep(3)\n\n\nclass Example(QWidget):\n def __init__(self):\n super(Example, self).__init__()\n mthread = addItemThread()\n mthread.update_qvix.connect(self.update_qvix)\n mthread.update_north.connect(self.update_north)\n mthread.update_vol.connect(self.update_volume)\n mthread.update_month.connect(self.update_month)\n mthread.update_iv.connect(self.update_iv)\n mthread.update_greek.connect(self.update_greek)\n mthread.start()\n self.initUI()\n \n\n def initUI(self):\n self.setGeometry(400,400,1200,620)\n self.setWindowTitle(\"不被仓位左右思想,没找到弱点不要重仓\")\n self.gridLayout = QGridLayout(self)\n self.plot()\n \n '''\n buttom\n '''\n self.label_greek = QLabel('label_greek')\n self.label_greek.setStyleSheet(\"background-color:rgb(250,250,250)\")\n self.gridLayout.addWidget(self.label_greek, 2, 0,1,3)\n '''\n right\n '''\n # wight_r = QWidget(self)\n # layout_r = QVBoxLayout()\n # wight_r.setLayout(layout_r)\n # btn_calculated = QPushButton('计算收益')\n # layout_r.addWidget(btn_calculated)\n # self.gridLayout.addWidget(wight_r, 0, 3,2,1)\n\n def plot(self):\n pg.setConfigOption('background', 'w')\n pg.setConfigOption('foreground', 'k')\n\n pw_iv50 = pg.PlotWidget(title='50-IV')\n self.plt_iv50_1 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=12,symbolBrush=(0,255,0))\n self.plt_iv50_2 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=12,symbolBrush=(0,255,0))\n self.plt_iv50_3 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=10,symbolBrush=(0,170,0))\n self.plt_iv50_4 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=10,symbolBrush=(0,170,0))\n self.plt_iv50_5 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=8,symbolBrush=(0,85,0))\n self.plt_iv50_6 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=8,symbolBrush=(0,85,0))\n self.plt_iv50_7 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=6,symbolBrush=(0,0,0))\n self.plt_iv50_8 = pw_iv50.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=6,symbolBrush=(0,0,0))\n self.gridLayout.addWidget(pw_iv50, 0, 0)\n \n\n plt300 = pg.PlotWidget(title='300-IV')\n self.plt_iv300_1 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=12,symbolBrush=(0,255,0))\n self.plt_iv300_2 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=12,symbolBrush=(0,255,0))\n self.plt_iv300_3 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=10,symbolBrush=(0,170,0))\n self.plt_iv300_4 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=10,symbolBrush=(0,170,0))\n self.plt_iv300_5 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=8,symbolBrush=(0,85,0))\n self.plt_iv300_6 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=8,symbolBrush=(0,85,0))\n self.plt_iv300_7 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"r\",width=1),symbolSize=6,symbolBrush=(0,0,0))\n self.plt_iv300_8 = plt300.plot(symbol=\"o\",pen=pg.mkPen(\"g\",width=1),symbolSize=6,symbolBrush=(0,0,0))\n self.gridLayout.addWidget(plt300, 0, 1)\n\n pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')\n pw_month.showGrid(x=False,y=True)\n pw_month.addLegend(offset=(30, 100))\n self.plt_month50 = pw_month.plot(name=\"50\")\n self.plt_month300 = pw_month.plot(name=\"300\")\n self.gridLayout.addWidget(pw_month, 0, 2)\n\n pw_qvix = pg.PlotWidget( title='QVIX')\n pw_qvix.showGrid(x=True,y=True)\n pw_qvix.addLegend()\n self.plt_qvix = pw_qvix.plot(pen=pg.mkPen(\"d\",width=4),name=\"iv\")\n self.gridLayout.addWidget(pw_qvix, 1, 0)\n\n pw_north = pg.PlotWidget( title='NORTH')\n pw_north.showGrid(x=False,y=True)\n pw_north.addLegend()\n self.plt_north_hgt =pw_north.plot(pen=pg.mkPen(\"b\",width=2),name=\"hgt\")\n self.plt_north_sgt =pw_north.plot(pen=pg.mkPen(\"g\",width=1),name=\"sgt\")\n self.plt_north_all =pw_north.plot(pen=pg.mkPen(\"d\",width=1),name=\"all\")\n self.gridLayout.addWidget(pw_north, 1, 1)\n\n pw_volume = pg.PlotWidget( title='VOLUME')\n pw_volume.showGrid(x=False,y=True)\n self.plt_volume =pw_volume.plot(name=\"volume\")\n self.stock_50 =pw_volume.plot(name=\"stock_50\")\n self.gridLayout.addWidget(pw_volume, 1, 2)\n\n def update_qvix(self,df):\n df = df.drop(['Pre','max','min'],axis=1)\n self.plt_qvix.setData(df.index.values, df['QVIX'])\n\n def update_north(self,df):\n self.plt_north_hgt.setData( df['hgt'].astype(float)/10000)\n self.plt_north_sgt.setData( df['sgt'].astype(float)/10000)\n self.plt_north_all.setData(df['all'].astype(float)/10000)\n\n def update_volume(self,data,ser):\n self.plt_volume.setPen(pg.mkPen(\"b\",width=3))\n self.plt_volume.setData(data.values)\n self.stock_50.setData(ser)\n\n def update_month(self,data):\n data.columns=['data','50iv','data2','300iv']\n self.plt_month50.setData(data['50iv'])\n self.plt_month50.setPen(pg.mkPen(\"r\",width=2))\n self.plt_month300.setData(data['300iv'])\n self.plt_month300.setPen(pg.mkPen(\"b\",width=1))\n\n def update_iv(self,data50,data300):\n data50.sort_index(inplace=True)\n data50 = data50.astype(float)\n data50[data50<1]=np.nan\n self.plt_iv50_1.setData(data50.iloc[:,0])\n self.plt_iv50_2.setData(data50.iloc[:,5])\n self.plt_iv50_3.setData(data50.iloc[:,1])\n self.plt_iv50_4.setData(data50.iloc[:,6])\n self.plt_iv50_5.setData(data50.iloc[:,2])\n self.plt_iv50_6.setData(data50.iloc[:,7])\n self.plt_iv50_7.setData(data50.iloc[:,3])\n self.plt_iv50_8.setData(data50.iloc[:,8])\n\n data300.sort_index(inplace=True)\n data300 = data300.astype(float)\n data300[data300<1]=np.nan\n self.plt_iv300_1.setData(data300.iloc[:,0])\n self.plt_iv300_2.setData(data300.iloc[:,5])\n self.plt_iv300_3.setData(data300.iloc[:,1])\n self.plt_iv300_4.setData(data300.iloc[:,6])\n self.plt_iv300_5.setData(data300.iloc[:,2])\n self.plt_iv300_6.setData(data300.iloc[:,7])\n self.plt_iv300_7.setData(data300.iloc[:,3])\n self.plt_iv300_8.setData(data300.iloc[:,8])\n \n def update_greek(self,gk):\n text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0],gk[1],gk[2],gk[3])\n self.label_greek.setText(text)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec_())",
"step-ids": [
11,
12,
13,
14,
17
]
}
|
[
11,
12,
13,
14,
17
] |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Nirvana
#
# Created: 07/06/2014
# Copyright: (c) Nirvana 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import random
class Coin(object):
def __init__(self):
self.sideup = "Heads"
def toss(self):
if random.randint(0,1)==0:
self.sideup = "Heads"
else:
self.sideup = "Tails"
def get_sideup(self):
return self.sideup
mycoin=Coin()
print (mycoin.sideup)
print (mycoin.get_sideup())
mycoin.toss()
print (mycoin.get_sideup())
|
normal
|
{
"blob_id": "eb246beb05249f5dfde019b773698ba3bb1b1118",
"index": 544,
"step-1": "<mask token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\n<mask token>\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"step-3": "<mask token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\nmycoin = Coin()\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"step-4": "import random\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\nmycoin = Coin()\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"step-5": "#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Nirvana\n#\n# Created: 07/06/2014\n# Copyright: (c) Nirvana 2014\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\nimport random\n\nclass Coin(object):\n def __init__(self):\n self.sideup = \"Heads\"\n\n def toss(self):\n if random.randint(0,1)==0:\n self.sideup = \"Heads\"\n else:\n self.sideup = \"Tails\"\n\n def get_sideup(self):\n return self.sideup\n\nmycoin=Coin()\nprint (mycoin.sideup)\nprint (mycoin.get_sideup())\nmycoin.toss()\nprint (mycoin.get_sideup())\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from os.path import basename
from .FileInfo import FileInfo
class mrk_file(FileInfo):
"""
.mrk specific file container.
"""
def __init__(self, id_=None, file=None, parent=None):
super(mrk_file, self).__init__(id_, file, parent)
self._type = '.mrk'
#region class methods
def __getstate__(self):
data = super(mrk_file, self).__getstate__()
return data
def __setstate__(self, state):
super(mrk_file, self).__setstate__(state)
def __repr__(self):
# Have a separate representation for .mrk files as this is shown in the
# info for each con file under the list of associated mrk's.
return str(basename(self.file))
|
normal
|
{
"blob_id": "8e9aec7d3653137a05f94e4041d28f3423122751",
"index": 3990,
"step-1": "<mask token>\n\n\nclass mrk_file(FileInfo):\n <mask token>\n\n def __init__(self, id_=None, file=None, parent=None):\n super(mrk_file, self).__init__(id_, file, parent)\n self._type = '.mrk'\n <mask token>\n\n def __setstate__(self, state):\n super(mrk_file, self).__setstate__(state)\n\n def __repr__(self):\n return str(basename(self.file))\n",
"step-2": "<mask token>\n\n\nclass mrk_file(FileInfo):\n <mask token>\n\n def __init__(self, id_=None, file=None, parent=None):\n super(mrk_file, self).__init__(id_, file, parent)\n self._type = '.mrk'\n\n def __getstate__(self):\n data = super(mrk_file, self).__getstate__()\n return data\n\n def __setstate__(self, state):\n super(mrk_file, self).__setstate__(state)\n\n def __repr__(self):\n return str(basename(self.file))\n",
"step-3": "<mask token>\n\n\nclass mrk_file(FileInfo):\n \"\"\"\n .mrk specific file container.\n \"\"\"\n\n def __init__(self, id_=None, file=None, parent=None):\n super(mrk_file, self).__init__(id_, file, parent)\n self._type = '.mrk'\n\n def __getstate__(self):\n data = super(mrk_file, self).__getstate__()\n return data\n\n def __setstate__(self, state):\n super(mrk_file, self).__setstate__(state)\n\n def __repr__(self):\n return str(basename(self.file))\n",
"step-4": "from os.path import basename\nfrom .FileInfo import FileInfo\n\n\nclass mrk_file(FileInfo):\n \"\"\"\n .mrk specific file container.\n \"\"\"\n\n def __init__(self, id_=None, file=None, parent=None):\n super(mrk_file, self).__init__(id_, file, parent)\n self._type = '.mrk'\n\n def __getstate__(self):\n data = super(mrk_file, self).__getstate__()\n return data\n\n def __setstate__(self, state):\n super(mrk_file, self).__setstate__(state)\n\n def __repr__(self):\n return str(basename(self.file))\n",
"step-5": "from os.path import basename\n\nfrom .FileInfo import FileInfo\n\n\nclass mrk_file(FileInfo):\n \"\"\"\n .mrk specific file container.\n \"\"\"\n def __init__(self, id_=None, file=None, parent=None):\n super(mrk_file, self).__init__(id_, file, parent)\n self._type = '.mrk'\n\n#region class methods\n\n def __getstate__(self):\n data = super(mrk_file, self).__getstate__()\n\n return data\n\n def __setstate__(self, state):\n super(mrk_file, self).__setstate__(state)\n\n def __repr__(self):\n # Have a separate representation for .mrk files as this is shown in the\n # info for each con file under the list of associated mrk's.\n return str(basename(self.file))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.urls import path
from django.conf.urls.i18n import urlpatterns
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('home', views.home, name='home'),
path('collab/', views.collab, name='collab'),
]
|
normal
|
{
"blob_id": "351963bee76ecaa9fa5c8d659f6d7c6ca9b22531",
"index": 2182,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('signup/', views.signup, name='signup'), path('home',\n views.home, name='home'), path('collab/', views.collab, name='collab')]\n",
"step-3": "from django.urls import path\nfrom django.conf.urls.i18n import urlpatterns\nfrom . import views\nurlpatterns = [path('signup/', views.signup, name='signup'), path('home',\n views.home, name='home'), path('collab/', views.collab, name='collab')]\n",
"step-4": "from django.urls import path\nfrom django.conf.urls.i18n import urlpatterns\n\nfrom . import views\n\nurlpatterns = [\n path('signup/', views.signup, name='signup'),\n path('home', views.home, name='home'),\n path('collab/', views.collab, name='collab'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
'''
1. Create 0-D array, 1-D array, 2-D array, 3-D array with following value
0-D: [2]
1-D: [3, 4, 5, 6, 7]
2-D: [[8, 1, 3], [2, 3, 4], [6, 2, 5]]
3-D: [[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]]
print them
'''
D0 = np.array(2)
D1 = np.array([3, 4, 5, 6, 7])
D2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])
D3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])
print('D0')
print(D0)
print('D1')
print(D1)
print('D2')
print(D2)
print('D3')
print(D3)
'''
2. Use index to change all value 8 to 100 in 4 arrays
array[index1, index2] = newValue
for example: 2-D array should be changed as : [[100, 1, 3], [2, 3, 4], [6, 2, 5]]
print them
'''
D2[0, 0] = 100
print('D2')
print(D2)
D3[1, 0, 1] = 100
D3[1, 2, 0] = 100
print('D3')
print(D3)
'''
3. Print the sum of all following values
a. the value of 0-D array
b. the middle of 1-D array
c. the center of 2-D array
d. the center of 3-D array ( the center of middle 2-D array )
* The value should be 11
'''
print('*** the final sum result is: ')
print(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])
|
normal
|
{
"blob_id": "a868ecb6ea6a5c7a186ddd8fa4fb76d96efeb21d",
"index": 4140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nprint('D2')\nprint(D2)\n<mask token>\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-3": "<mask token>\nD0 = np.array(2)\nD1 = np.array([3, 4, 5, 6, 7])\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8,\n 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nD2[0, 0] = 100\nprint('D2')\nprint(D2)\nD3[1, 0, 1] = 100\nD3[1, 2, 0] = 100\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-4": "import numpy as np\n<mask token>\nD0 = np.array(2)\nD1 = np.array([3, 4, 5, 6, 7])\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8,\n 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nD2[0, 0] = 100\nprint('D2')\nprint(D2)\nD3[1, 0, 1] = 100\nD3[1, 2, 0] = 100\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-5": "import numpy as np\r\n\r\n'''\r\n1. Create 0-D array, 1-D array, 2-D array, 3-D array with following value\r\n\r\n\t0-D: [2]\r\n\t1-D: [3, 4, 5, 6, 7]\r\n\t2-D: [[8, 1, 3], [2, 3, 4], [6, 2, 5]]\r\n\t3-D: [[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]]\r\n\r\n\tprint them\r\n'''\r\nD0 = np.array(2)\r\nD1 = np.array([3, 4, 5, 6, 7])\r\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\r\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\r\nprint('D0')\r\nprint(D0)\r\nprint('D1')\r\nprint(D1)\r\nprint('D2')\r\nprint(D2)\r\nprint('D3')\r\nprint(D3)\r\n\r\n'''\r\n2. Use index to change all value 8 to 100 in 4 arrays\r\n\r\n\tarray[index1, index2] = newValue\r\n for example: 2-D array should be changed as : [[100, 1, 3], [2, 3, 4], [6, 2, 5]]\r\n\r\n\tprint them\r\n'''\r\nD2[0, 0] = 100\r\nprint('D2')\r\nprint(D2)\r\nD3[1, 0, 1] = 100\r\nD3[1, 2, 0] = 100\r\nprint('D3')\r\nprint(D3)\r\n'''\r\n3. Print the sum of all following values\r\n\r\n\ta. the value of 0-D array\r\n\tb. the middle of 1-D array\r\n\tc. the center of 2-D array\r\n\td. the center of 3-D array ( the center of middle 2-D array )\r\n\r\n\t* The value should be 11\r\n'''\r\nprint('*** the final sum result is: ')\r\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from .models import Account
# Register your models here.
class AuthenticationCustom(admin.ModelAdmin):
list_display = ("email", "id")
search_fields = ["email", "mobile"]
admin.site.register(Account, AuthenticationCustom)
|
normal
|
{
"blob_id": "4957e62deec6192aabdf7144f02b28c7ce60ed4b",
"index": 4250,
"step-1": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\nadmin.site.register(Account, AuthenticationCustom)\n",
"step-4": "from django.contrib import admin\nfrom .models import Account\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\nadmin.site.register(Account, AuthenticationCustom)\n",
"step-5": "from django.contrib import admin\nfrom .models import Account\n# Register your models here.\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n\tlist_display = (\"email\", \"id\")\n\n\tsearch_fields = [\"email\", \"mobile\"]\n\n\nadmin.site.register(Account, AuthenticationCustom)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
from unittest import TestCase
from optimoida.logging import (
SUCCESS, FAILURE, logger)
class LoggerTestCase(TestCase):
def test_flag_value(self):
self.assertEqual(SUCCESS, "\x1b[34mSUCCESS\x1b[0m")
self.assertEqual(FAILURE, "\x1b[31mFAILURE\x1b[0m")
def test_logger(self):
msg = "test"
self.assertEqual(logger.info(msg), "\x1b[97m[~] \x1b[0mtest")
self.assertEqual(
logger.info(msg, SUCCESS),
"\x1b[97m[~] \x1b[0m\x1b[34mSUCCESS\x1b[0m test")
self.assertEqual(logger.warn(msg), "\x1b[33m[!] \x1b[0mtest")
self.assertEqual(logger.error(msg), "\x1b[31m[-] \x1b[0mtest")
self.assertEqual(
logger.error(msg, FAILURE),
"\x1b[31m[-] \x1b[0m\x1b[31mFAILURE\x1b[0m test")
|
normal
|
{
"blob_id": "ac8c8dc4bcccef7942dd48d54902e13e811f950c",
"index": 5059,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoggerTestCase(TestCase):\n\n def test_flag_value(self):\n self.assertEqual(SUCCESS, '\\x1b[34mSUCCESS\\x1b[0m')\n self.assertEqual(FAILURE, '\\x1b[31mFAILURE\\x1b[0m')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LoggerTestCase(TestCase):\n\n def test_flag_value(self):\n self.assertEqual(SUCCESS, '\\x1b[34mSUCCESS\\x1b[0m')\n self.assertEqual(FAILURE, '\\x1b[31mFAILURE\\x1b[0m')\n\n def test_logger(self):\n msg = 'test'\n self.assertEqual(logger.info(msg), '\\x1b[97m[~] \\x1b[0mtest')\n self.assertEqual(logger.info(msg, SUCCESS),\n '\\x1b[97m[~] \\x1b[0m\\x1b[34mSUCCESS\\x1b[0m test')\n self.assertEqual(logger.warn(msg), '\\x1b[33m[!] \\x1b[0mtest')\n self.assertEqual(logger.error(msg), '\\x1b[31m[-] \\x1b[0mtest')\n self.assertEqual(logger.error(msg, FAILURE),\n '\\x1b[31m[-] \\x1b[0m\\x1b[31mFAILURE\\x1b[0m test')\n",
"step-4": "from unittest import TestCase\nfrom optimoida.logging import SUCCESS, FAILURE, logger\n\n\nclass LoggerTestCase(TestCase):\n\n def test_flag_value(self):\n self.assertEqual(SUCCESS, '\\x1b[34mSUCCESS\\x1b[0m')\n self.assertEqual(FAILURE, '\\x1b[31mFAILURE\\x1b[0m')\n\n def test_logger(self):\n msg = 'test'\n self.assertEqual(logger.info(msg), '\\x1b[97m[~] \\x1b[0mtest')\n self.assertEqual(logger.info(msg, SUCCESS),\n '\\x1b[97m[~] \\x1b[0m\\x1b[34mSUCCESS\\x1b[0m test')\n self.assertEqual(logger.warn(msg), '\\x1b[33m[!] \\x1b[0mtest')\n self.assertEqual(logger.error(msg), '\\x1b[31m[-] \\x1b[0mtest')\n self.assertEqual(logger.error(msg, FAILURE),\n '\\x1b[31m[-] \\x1b[0m\\x1b[31mFAILURE\\x1b[0m test')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom unittest import TestCase\nfrom optimoida.logging import (\n SUCCESS, FAILURE, logger)\n\n\nclass LoggerTestCase(TestCase):\n\n def test_flag_value(self):\n\n self.assertEqual(SUCCESS, \"\\x1b[34mSUCCESS\\x1b[0m\")\n self.assertEqual(FAILURE, \"\\x1b[31mFAILURE\\x1b[0m\")\n\n def test_logger(self):\n\n msg = \"test\"\n\n self.assertEqual(logger.info(msg), \"\\x1b[97m[~] \\x1b[0mtest\")\n self.assertEqual(\n logger.info(msg, SUCCESS),\n \"\\x1b[97m[~] \\x1b[0m\\x1b[34mSUCCESS\\x1b[0m test\")\n\n self.assertEqual(logger.warn(msg), \"\\x1b[33m[!] \\x1b[0mtest\")\n\n self.assertEqual(logger.error(msg), \"\\x1b[31m[-] \\x1b[0mtest\")\n self.assertEqual(\n logger.error(msg, FAILURE),\n \"\\x1b[31m[-] \\x1b[0m\\x1b[31mFAILURE\\x1b[0m test\")\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# coding=gbk
from numpy import *
import fp_growth
'''
#创建树的一个单节点
rootNode=fp_growth.treeNode('pyramid',9,None)
#为其增加一个子节点
rootNode.children['eye']=fp_growth.treeNode('eye',13,None)
rootNode.disp()
#导入事务数据库实例
simpData=fp_growth.loadSimpData()
#print("simpData:")
#print(simpData)
#对数据进行格式化处理
initSet=fp_growth.createInitSet(simpData)
#print("initSet:")
#print(initSet)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,3)
#print("myFPtree:")
#print(myFPtree)
#myFPtree.disp()
print("myFPtree:")
#print(myFPtree)
myFPtree.disp()
print("myHeaderTab:")
for item in myHeaderTab.items():
print(item)
path=fp_growth.findPrefixPath('r',myHeaderTab['r'][1])
print("path:")
print(path)
#建立一个空列表来存储所有的频繁项集
freqItems=[]
fp_growth.minTree(myFPtree,myHeaderTab,3,set([]),freqItems)
'''
parsedDat=[line.split() for line in open('kosarak.dat').readlines()]
initSet=fp_growth.createInitSet(parsedDat)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,100000)
myFreqList=[]
fp_growth.minTree(myFPtree,myHeaderTab,100000,set([]),myFreqList)
print(len(myFreqList))
|
normal
|
{
"blob_id": "e8b0e6e5e68933703e2ac8c9b2b62d68c0c2f53d",
"index": 8295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-3": "<mask token>\nparsedDat = [line.split() for line in open('kosarak.dat').readlines()]\ninitSet = fp_growth.createInitSet(parsedDat)\nmyFPtree, myHeaderTab = fp_growth.createTree(initSet, 100000)\nmyFreqList = []\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-4": "from numpy import *\nimport fp_growth\n<mask token>\nparsedDat = [line.split() for line in open('kosarak.dat').readlines()]\ninitSet = fp_growth.createInitSet(parsedDat)\nmyFPtree, myHeaderTab = fp_growth.createTree(initSet, 100000)\nmyFreqList = []\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-5": "# coding=gbk\nfrom numpy import *\n\nimport fp_growth\n\n\n'''\n#创建树的一个单节点\nrootNode=fp_growth.treeNode('pyramid',9,None)\n#为其增加一个子节点\nrootNode.children['eye']=fp_growth.treeNode('eye',13,None)\n\nrootNode.disp()\n\n\n\n#导入事务数据库实例\nsimpData=fp_growth.loadSimpData()\n#print(\"simpData:\")\n#print(simpData)\n\n#对数据进行格式化处理\ninitSet=fp_growth.createInitSet(simpData)\n#print(\"initSet:\")\n#print(initSet)\n\nmyFPtree,myHeaderTab=fp_growth.createTree(initSet,3)\n\n#print(\"myFPtree:\")\n#print(myFPtree)\n#myFPtree.disp()\n\nprint(\"myFPtree:\")\n#print(myFPtree)\nmyFPtree.disp()\n\n\nprint(\"myHeaderTab:\")\nfor item in myHeaderTab.items():\n\tprint(item)\n\t\npath=fp_growth.findPrefixPath('r',myHeaderTab['r'][1])\nprint(\"path:\")\t\nprint(path)\n\n#建立一个空列表来存储所有的频繁项集\nfreqItems=[]\nfp_growth.minTree(myFPtree,myHeaderTab,3,set([]),freqItems)\n\n\n'''\n\nparsedDat=[line.split() for line in open('kosarak.dat').readlines()]\ninitSet=fp_growth.createInitSet(parsedDat)\nmyFPtree,myHeaderTab=fp_growth.createTree(initSet,100000)\nmyFreqList=[]\nfp_growth.minTree(myFPtree,myHeaderTab,100000,set([]),myFreqList)\nprint(len(myFreqList))\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import time
import re
import json
from os.path import join, getsize
from aiohttp import web
from utils import helper
TBL_HEAD = '''
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
'''
TBL_FOOTER = '''
</tbody>
</table>
'''
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root,d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
# we ignore (internal) meta directories
continue
d[root] = size
ret = ''
ret += "<h2>Files Count</h2>Number of files: {}<br /><br />".format(cpt)
ret += "<h2>Disk Consumption</h2>"
ret += "Database disk consumption overall: {} MB<br /><br />".format(d[root_path] // (1024*1024))
ret += "<h4>Resouce Usage Listed by Objects</h4><br />"
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += "<td>{}</td><td>{}</td>".format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
|
normal
|
{
"blob_id": "7c9b51ae7cde9c3a00888dac6df710b93af6dd7f",
"index": 4836,
"step-1": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\n<mask token>\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-2": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-3": "<mask token>\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-4": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\nfrom aiohttp import web\nfrom utils import helper\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-5": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\n\nfrom aiohttp import web\n\nfrom utils import helper\n\nTBL_HEAD = '''\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n'''\n\nTBL_FOOTER = '''\n </tbody>\n</table>\n'''\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root,d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n # we ignore (internal) meta directories\n continue\n d[root] = size\n\n ret = ''\n ret += \"<h2>Files Count</h2>Number of files: {}<br /><br />\".format(cpt)\n ret += \"<h2>Disk Consumption</h2>\"\n ret += \"Database disk consumption overall: {} MB<br /><br />\".format(d[root_path] // (1024*1024))\n ret += \"<h4>Resouce Usage Listed by Objects</h4><br />\"\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += \"<td>{}</td><td>{}</td>\".format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import uuid
import mock
import mox
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
from ..resources import cloud_loadbalancer as lb # noqa
# The following fakes are for pyrax
cert = """\n-----BEGIN CERTIFICATE-----
MIIFBjCCAu4CCQDWdcR5LY/+/jANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMB4XDTE0MTAxNjE3MDYxNVoXDTE1MTAxNjE3MDYxNVowRTELMAkG
A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
IFdpZGdpdHMgUHR5IEx0ZDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AMm5NcP0tMKHblT6Ud1k8TxZ9/8uOHwUNPbvFsvSyCupj0J0vGCTjbuC2I5T/CXR
tnLEIt/EarlNAqcjbDCWtSyEKs3zDmmkreoIDEa8pyAQ2ycsCXGMxDN97F3/wlLZ
agUNM0FwGHLZWBg62bM6l+bpTUcX0PqSyv/aVMhJ8EPDX0Dx1RYsVwUzIe/HWC7x
vCmtDApAp1Fwq7AwlRaKU17sGwPWJ8+I8PyouBdqNuslHm7LQ0XvBA5DfkQA6feB
ZeJIyOtctM9WFWQI5fKOsyt5P306B3Zztw9VZLAmZ8qHex+R1WY1zXxDAwKEQz/X
8bRqMA/VU8OxJcK0AmY/1v/TFmAlRh2XBCIc+5UGtCcftWvZJAsKur8Hg5pPluGv
ptyqSgSsSKtOVWkyTANP1LyOkpBA8Kmkeo2CKXu1SCFypY5Q6E+Fy8Y8RaHJPvzR
NHcm1tkBvHOKyRso6FjvxuJEyIC9EyUK010nwQm7Qui11VgCSHBoaKVvkIbFfQdK
aCes0oQO5dqY0+fC/IFDhrxlvSd2Wk7KjuNjNu9kVN9Ama2pRTxhYKaN+GsHfoL7
ra6G9HjbUVULAdjCko3zOKEUzFLLf1VZYk7hDhyv9kovk0b8sr5WowxW7+9Wy0NK
WL5f2QgVCcoHw9bGhyuYQCdBfztNmKOWe9pGj6bQAx4pAgMBAAEwDQYJKoZIhvcN
AQEFBQADggIBALFSj3G2TEL/UWtNcPeY2fbxSGBrboFx3ur8+zTkdZzvfC8H9/UK
w0aRH0rK4+lKYDqF6A9bUHP17DaJm1lF9In38VVMOuur0ehUIn1S2U3OvlDLN68S
p5D4wGKMcUfUQ6pzhSKJCMvGX561TKHCc5fZhPruy75Xq2DcwJENE189foKLFvJs
ca4sIARqP6v1vfARcfH5leSsdIq8hy6VfL0BRATXfNHZh4SNbyDJYYTxrEUPHYXW
pzW6TziZXYNMG2ZRdHF/mDJuFzw2EklOrPC9MySCZv2i9swnqyuwNYh/SAMhodTv
ZDGy4nbjWNe5BflTMBceh45VpyTcnQulFhZQFwP79fK10BoDrOc1mEefhIqT+fPI
LJepLOf7CSXtYBcWbmMCLHNh+PrlCiA1QMTyd/AC1vvoiyCbs3M419XbXcBSDEh8
tACplmhf6z1vDkElWiDr8y0kujJ/Gie24iLTun6oHG+f+o6bbQ9w196T0olLcGx0
oAYL0Olqli6cWHhraVAzZ5t5PH4X9TiESuQ+PMjqGImCIUscXY4objdnB5dfPHoz
eF5whPl36/GK8HUixCibkCyqEOBBuNqhOz7nVLM0eg5L+TE5coizEBagxVCovYSj
fQ9zkIgaC5oeH6L0C1FFG1vRNSWokheBk14ztVoJCJyFr6p0/6pD7SeR
-----END CERTIFICATE-----\n"""
private_key = """\n-----BEGIN PRIVATE KEY-----
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJuTXD9LTCh25U
+lHdZPE8Wff/Ljh8FDT27xbL0sgrqY9CdLxgk427gtiOU/wl0bZyxCLfxGq5TQKn
I2wwlrUshCrN8w5ppK3qCAxGvKcgENsnLAlxjMQzfexd/8JS2WoFDTNBcBhy2VgY
OtmzOpfm6U1HF9D6ksr/2lTISfBDw19A8dUWLFcFMyHvx1gu8bwprQwKQKdRcKuw
MJUWilNe7BsD1ifPiPD8qLgXajbrJR5uy0NF7wQOQ35EAOn3gWXiSMjrXLTPVhVk
COXyjrMreT99Ogd2c7cPVWSwJmfKh3sfkdVmNc18QwMChEM/1/G0ajAP1VPDsSXC
tAJmP9b/0xZgJUYdlwQiHPuVBrQnH7Vr2SQLCrq/B4OaT5bhr6bcqkoErEirTlVp
MkwDT9S8jpKQQPCppHqNgil7tUghcqWOUOhPhcvGPEWhyT780TR3JtbZAbxziskb
KOhY78biRMiAvRMlCtNdJ8EJu0LotdVYAkhwaGilb5CGxX0HSmgnrNKEDuXamNPn
wvyBQ4a8Zb0ndlpOyo7jYzbvZFTfQJmtqUU8YWCmjfhrB36C+62uhvR421FVCwHY
wpKN8zihFMxSy39VWWJO4Q4cr/ZKL5NG/LK+VqMMVu/vVstDSli+X9kIFQnKB8PW
xocrmEAnQX87TZijlnvaRo+m0AMeKQIDAQABAoICAA8DuBrDxgiMqAuvLhS6hLIn
SCw4NoAVyPNwTFQTdk65qi4aHkNZ+DyyuoetfKEcAOZ97tKU/hSYxM/H9S+QqB+O
HtmBc9stJLy8qJ1DQXVDi+xYfMN05M2oW8WLWd1szVVe7Ce8vjUeNE5pYvbSL6hC
STw3a5ibAH0WtSTLTBTfH+HnniKuXjPG4InGXqvv1j+L38+LjGilaEIO+6nX1ejE
ziX09LWfzcAglsM3ZqsN8jvw6Sr1ZWniYC2Tm9aOTRUQsdPC7LpZ//GYL/Vj5bYg
qjcZ8KBCcKe1hW8PDL6oYuOwqR+YdZkAK+MuEQtZeWYiWT10dW2la9gYKe2OZuQ1
7q3zZ6zLP+XP+0N7DRMTTuk2gurBVX7VldzIzvjmW8X+8Q5QO+EAqKr2yordK3S1
uYcKmyL4Nd6rSFjRo0zSqHMNOyKt3b1r3m/eR2W623rT5uTjgNYpiwCNxnxmcjpK
Sq7JzZKz9NLbEKQWsP9gQ3G6pp3XfLtoOHEDkSKMmQxd8mzK6Ja/9iC+JGqRTJN+
STe1vL9L2DC7GnjOH1h2TwLoLtQWSGebf/GBxju0e5pAL0UYWBNjAwcpOoRU9J5J
y9E7sNbbXTmK2rg3B/5VKGQckBWfurg7CjAmHGgz9xxceJQLKvT1O5zHZc+v4TVB
XDZjtz8L2k3wFLDynDY5AoIBAQDm2fFgx4vk+gRFXPoLNN34Jw2fT+xuwD/H7K0e
0Cas0NfyNil/Kbp+rhMHuVXTt86BIY+z8GO4wwn+YdDgihBwobAh2G9T/P6wNm+Q
NcIeRioml8V/CP7lOQONQJ6sLTRYnNLfB96uMFe+13DO/PjFybee5VflfBUrJK1M
DqRLwm9wEIf5p0CWYI/ZJaDNN71B09BB/jdT/e7Ro1hXHlq3W4tKqRDPfuUqwy3H
ocYQ1SUk3oFdSiYFd6PijNkfTnrtyToa0xUL9uGL+De1LfgV+uvqkOduQqnpm/5+
XQC1qbTUjq+4WEsuPjYf2E0WAVFGzwzWcdb0LnMIUJHwPvpLAoIBAQDfsvCZlcFM
nGBk1zUnV3+21CPK+5+X3zLHr/4otQHlGMFL6ZiQManvKMX6a/cT3rG+LvECcXGD
jSsTu7JIt9l8VTpbPaS76htTmQYaAZERitBx1C8zDMuI2O4bjFLUGUX73RyTZdRm
G68IX+7Q7SL8zr/fHjcnk+3yj0L1soAVPC7lY3se7vQ/SCre97E+noP5yOhrpnRt
dij7NYy79xcvUZfc/z0//Ia4JSCcIvv2HO7JZIPzUCVO4sjbUOGsgR9pwwQkwYeP
b5P0MVaPgFnOgo/rz6Uqe+LpeY83SUwc2q8W8bskzTLZEnwSV5bxCY+gIn9KCZSG
8QxuftgIiQDbAoIBAQDQ2oTC5kXulzOd/YxK7z2S8OImLAzf9ha+LaZCplcXKqr0
e4P3hC0xxxN4fXjk3vp5YX+9b9MIqYw1FRIA02gkPmQ3erTd65oQmm88rSY+dYRU
/iKz19OkVnycIsZrR0qAkQFGvrv8I8h+5DMvUTdQ2jrCCwQGnsgYDEqs8OI7mGFx
pcMfXu3UHvCFqMFeaPtUvuk/i1tLJgYWrA2UY+X21V+j4GlREKEMmyCj5/xl5jCA
tr2bRSY49BDVOlCFPl+BGfjzo9z6whU0qRDdXgWA/U7LHOYEn1NSAsuwTzwBHtR3
KdBYm6kI4Ufeb7buHasGwPQAX2X17MAt2ZbvIEsZAoIBAQC4g5dzh5PGhmH4K48b
YU/l1TukzUIJekAfd+ozV4I1nuKppAeEQILD0yTh9zX4vMJtdbiz5DDWapWylCpt
UsBgjsgwxDriCSr7HIhs4QfwqUhf67325MHpoc1dCbS0YBhatDpC1kaI5qLMTJzm
1gL69epLtleWHK2zWjnIAbEmUtr3uMOwczciD3vVKAeZ+BQx72bOjKESPNl2w+fO
jvQfwrR5xEqYQco5j95DC5Q6oAjSM0enZV8wn10/kYpjyKnJieMcEkmnpUgrrpqQ
iTUKYqUlw8OftEopfGwGFT5junmbek57/4nGhTmzw22sac9/LZVC034ghClV5uh4
udDrAoIBAQCJHfBPJmJMT/WtSATTceVDgZiyezWNgH2yLJMqDP6sEuImnLAg2L9M
Yc6LqMcHLj7CyXfy2AEAuYTZwXFSRmVKl6Ycad7sS/hIL1ykvDveRU9VNImexDBq
AJR4GKr6jbRZnBztnRYZTsGA+TcrFc6SwdSPXgz7JQT9uw+JkhLi59m141XBdeRc
NQ/LFgOaxjvRUID81izQaYEyADId7asy+2QVazMDafuALJ23WSUMSXajCXaC6/7N
53RWrOAb+kFRgjuHM8pQkpgnY/Ds0MZxpakFw3Y7PAEL99xyYdR+rE3JOMjPlgr0
LpTt0Xs1OFZxaNpolW5Qis4os7UmmIRV
-----END PRIVATE KEY-----\n"""
class FakeException(Exception):
pass
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
class FakeManager(object):
api = FakeClient()
def list(self):
pass
def get(self, item):
pass
def delete(self, item):
pass
def create(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
pass
def action(self, item, action_type, body=None):
pass
class FakeLoadBalancerManager(object):
def __init__(self, api=None, *args, **kwargs):
pass
def set_content_caching(self, *args, **kwargs):
pass
class FakeNode(object):
def __init__(self, address=None, port=None, condition=None, weight=None,
status=None, parent=None, type=None, id=None):
if not (address and port):
# This mimics the check that pyrax does on Node instantiation
raise TypeError("You must include an address and "
"a port when creating a node.")
self.address = address
self.port = port
self.condition = condition
self.weight = weight
self.status = status
self.parent = parent
self.type = type
self.id = id
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def update(self):
pass
def delete(self):
pass
class FakeVirtualIP(object):
def __init__(self, address=None, port=None, condition=None,
ipVersion=None, type=None, id=None):
self.address = address
self.port = port
self.condition = condition
self.ipVersion = ipVersion
self.type = type
self.id = id
self.ip_version = ipVersion
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
class FakeLoadBalancerClient(object):
def __init__(self, *args, **kwargs):
self.Node = FakeNode
self.VirtualIP = FakeVirtualIP
pass
def get(self, *args, **kwargs):
pass
def create(self, *args, **kwargs):
pass
class FakeLoadBalancer(object):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or uuid.uuid4()
info = info or {"fake": "fake"}
self.id = uuid.uuid4()
self.manager = FakeLoadBalancerManager()
self.Node = FakeNode
self.VirtualIP = FakeVirtualIP
self.nodes = []
self.algorithm = "ROUND_ROBIN"
self.session_persistence = "HTTP_COOKIE"
self.connection_logging = False
self.timeout = None
self.httpsRedirect = False
self.protocol = None
self.port = None
self.name = None
self.halfClosed = None
self.content_caching = False
def get(self, *args, **kwargs):
pass
def add_nodes(self, *args, **kwargs):
pass
def add_ssl_termination(self, *args, **kwargs):
pass
def set_error_page(self, *args, **kwargs):
pass
def clear_error_page(self, *args, **kwargs):
pass
def add_access_list(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def add_health_monitor(self, *args, **kwargs):
pass
def delete_health_monitor(self, *args, **kwargs):
pass
def delete_ssl_termination(self, *args, **kwargs):
pass
def set_metadata(self, *args, **kwargs):
pass
def delete_metadata(self, *args, **kwargs):
pass
def add_connection_throttle(self, *args, **kwargs):
pass
def delete_connection_throttle(self, *args, **kwargs):
pass
def delete(self, *args, **kwargs):
pass
def get_health_monitor(self, *args, **kwargs):
return {}
def get_metadata(self, *args, **kwargs):
return {}
def get_error_page(self, *args, **kwargs):
pass
def get_connection_throttle(self, *args, **kwargs):
pass
def get_ssl_termination(self, *args, **kwargs):
pass
def get_access_list(self, *args, **kwargs):
pass
class LoadBalancerWithFakeClient(lb.CloudLoadBalancer):
def cloud_lb(self):
return FakeLoadBalancerClient()
def override_resource():
return {
'Rackspace::Cloud::LoadBalancer': LoadBalancerWithFakeClient
}
class LoadBalancerTest(common.HeatTestCase):
def setUp(self):
super(LoadBalancerTest, self).setUp()
self.lb_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "fawef",
"Resources": {
self._get_lb_resource_name(): {
"Type": "Rackspace::Cloud::LoadBalancer",
"Properties": {
"name": "test-clb",
"nodes": [{"addresses": ["166.78.103.141"],
"port": 80,
"condition": "ENABLED"}],
"protocol": "HTTP",
"port": 80,
"virtualIps": [
{"type": "PUBLIC", "ipVersion": "IPV6"}],
"algorithm": 'LEAST_CONNECTIONS',
"connectionThrottle": {'maxConnectionRate': 1000},
'timeout': 110,
'contentCaching': 'DISABLED'
}
}
}
}
self.lb_name = 'test-clb'
self.expected_body = {
"nodes": [FakeNode(address=u"166.78.103.141", port=80,
condition=u"ENABLED", type=u"PRIMARY",
weight=1)],
"protocol": u'HTTP',
"port": 80,
"virtual_ips": [FakeVirtualIP(type=u"PUBLIC", ipVersion=u"IPV6")],
"algorithm": u'LEAST_CONNECTIONS',
"connectionThrottle": {'maxConnectionRate': 1000,
'maxConnections': None,
'rateInterval': None,
'minConnections': None},
"connectionLogging": None,
"halfClosed": None,
"healthMonitor": None,
"metadata": None,
"sessionPersistence": None,
"timeout": 110,
"httpsRedirect": False
}
lb.resource_mapping = override_resource
resource._register_class("Rackspace::Cloud::LoadBalancer",
LoadBalancerWithFakeClient)
def _get_lb_resource_name(self):
return "lb-" + str(uuid.uuid4())
def __getattribute__(self, name):
if name == 'expected_body' or name == 'lb_template':
return copy.deepcopy(super(LoadBalancerTest, self)
.__getattribute__(name))
return super(LoadBalancerTest, self).__getattribute__(name)
def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):
resource_defns = tmpl.resource_definitions(stack)
rsrc = LoadBalancerWithFakeClient(resource_name,
resource_defns[resource_name],
stack)
fake_lb = FakeLoadBalancer(name=lb_name)
fake_lb.status = 'ACTIVE'
fake_lb.resource_id = 1234
self.m.StubOutWithMock(rsrc.clb, 'create')
rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
fake_lb)
return (rsrc, fake_lb)
def _get_first_resource_name(self, templ):
return next(k for k in templ['Resources'])
def _mock_loadbalancer(self, lb_template, expected_name, expected_body):
t = template_format.parse(json.dumps(lb_template))
self.stack = utils.parse_stack(t, stack_name=utils.random_name())
rsrc, fake_lb = self._mock_create(self.stack.t, self.stack,
self.
_get_first_resource_name(
lb_template),
expected_name,
expected_body)
return (rsrc, fake_lb)
def _set_template(self, templ, **kwargs):
for k, v in six.iteritems(kwargs):
templ['Resources'][self._get_first_resource_name(templ)][
'Properties'][k] = v
return templ
def _set_expected(self, expected, **kwargs):
for k, v in six.iteritems(kwargs):
expected[k] = v
return expected
def test_process_node(self):
nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True},
{'addresses': ['4567', '8901', '8903'], 'port': 80,
'enabled': True},
{'addresses': [], 'port': 80, 'enabled': True}]
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},
{'address': '4567', 'port': 80, 'enabled': True},
{'address': '8901', 'port': 80, 'enabled': True},
{'address': '8903', 'port': 80, 'enabled': True}]
self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))
def test_nodeless(self):
"""It's possible to create a LoadBalancer resource with no nodes."""
template = self._set_template(self.lb_template,
nodes=[])
expected_body = copy.deepcopy(self.expected_body)
expected_body['nodes'] = []
rsrc, fake_lb = self._mock_loadbalancer(
template, self.lb_name, expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_alter_properties(self):
# test alter properties functions
template = self._set_template(self.lb_template,
sessionPersistence='HTTP_COOKIE',
connectionLogging=True,
metadata={'yolo': 'heeyyy_gurl'})
expected = self._set_expected(self.expected_body,
sessionPersistence={
'persistenceType': 'HTTP_COOKIE'},
connectionLogging={'enabled': True},
metadata=[
{'key': 'yolo',
'value': 'heeyyy_gurl'}])
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_validate_vip(self):
snippet = {
"nodes": [],
"protocol": 'HTTP',
"port": 80,
"halfClosed": None,
"algorithm": u'LEAST_CONNECTIONS',
"virtualIps": [{"id": "1234"}]
}
stack = mock.Mock()
stack.db_resource_get.return_value = None
stack.has_cache_data.return_value = False
# happy path
resdef = rsrc_defn.ResourceDefinition("testvip",
lb.CloudLoadBalancer,
properties=snippet)
rsrc = lb.CloudLoadBalancer("testvip", resdef, stack)
self.assertIsNone(rsrc.validate())
# make sure the vip id prop is exclusive
snippet["virtualIps"][0]["type"] = "PUBLIC"
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn("Cannot specify type or version", str(exc))
# make sure you have to specify type and version if no id
snippet["virtualIps"] = [{}]
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn("Must specify VIP type and version", str(exc))
def test_validate_half_closed(self):
# test failure (invalid protocol)
template = self._set_template(self.lb_template, halfClosed=True)
expected = self._set_expected(self.expected_body, halfClosed=True)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn('The halfClosed property is only available for the TCP'
' or TCP_CLIENT_FIRST protocols', str(exc))
# test TCP protocol
template = self._set_template(template, protocol='TCP')
expected = self._set_expected(expected, protocol='TCP')
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.assertIsNone(rsrc.validate())
# test TCP_CLIENT_FIRST protocol
template = self._set_template(template,
protocol='TCP_CLIENT_FIRST')
expected = self._set_expected(expected,
protocol='TCP_CLIENT_FIRST')
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.assertIsNone(rsrc.validate())
def test_validate_health_monitor(self):
# test connect success
health_monitor = {
'type': 'CONNECT',
'attemptsBeforeDeactivation': 1,
'delay': 1,
'timeout': 1
}
template = self._set_template(self.lb_template,
healthMonitor=health_monitor)
expected = self._set_expected(self.expected_body,
healthMonitor=health_monitor)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.assertIsNone(rsrc.validate())
# test connect failure
# bodyRegex is only valid for type 'HTTP(S)'
health_monitor['bodyRegex'] = 'dfawefawe'
template = self._set_template(template,
healthMonitor=health_monitor)
expected = self._set_expected(expected,
healthMonitor=health_monitor)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn('Unknown Property bodyRegex', str(exc))
# test http fields
health_monitor['type'] = 'HTTP'
health_monitor['bodyRegex'] = 'bodyRegex'
health_monitor['statusRegex'] = 'statusRegex'
health_monitor['hostHeader'] = 'hostHeader'
health_monitor['path'] = 'path'
template = self._set_template(template,
healthMonitor=health_monitor)
expected = self._set_expected(expected,
healthMonitor=health_monitor)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.assertIsNone(rsrc.validate())
def test_validate_ssl_termination(self):
ssl_termination = {
'privatekey': 'ewfawe',
'intermediateCertificate': 'fwaefawe',
'secureTrafficOnly': True
}
# test ssl termination enabled without required fields failure
template = self._set_template(self.lb_template,
sslTermination=ssl_termination)
expected = self._set_expected(self.expected_body,
sslTermination=ssl_termination)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)
self.assertIn("Property certificate not assigned", six.text_type(exc))
ssl_termination['certificate'] = 'dfaewfwef'
template = self._set_template(template,
sslTermination=ssl_termination)
expected = self._set_expected(expected,
sslTermination=ssl_termination)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.assertIsNone(rsrc.validate())
def test_ssl_termination_unstripped_certificates(self):
ssl_termination_template = {
'securePort': 443,
'privatekey': 'afwefawe',
'certificate': ' \nfawefwea\n ',
'intermediateCertificate': "\n\nintermediate_certificate\n",
'secureTrafficOnly': False
}
ssl_termination_api = copy.deepcopy(ssl_termination_template)
template = self._set_template(self.lb_template,
sslTermination=ssl_termination_template)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443,
'certificate': 'fawefwea',
'intermediateCertificate': "intermediate_certificate",
'secureTrafficOnly': False,
'enabled': True,
})
self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
fake_lb.add_ssl_termination(**ssl_termination_api)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_ssl_termination_intermediateCertificate_None(self):
ssl_termination_template = {
'securePort': 443,
'privatekey': 'afwefawe',
'certificate': ' \nfawefwea\n ',
'intermediateCertificate': None,
'secureTrafficOnly': False
}
template = self._set_template(self.lb_template,
sslTermination=ssl_termination_template)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443,
'certificate': 'fawefwea',
'secureTrafficOnly': False,
'enabled': True,
})
self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
add_ssl_termination_args = {
'securePort': 443,
'privatekey': 'afwefawe',
'certificate': ' \nfawefwea\n ',
'intermediateCertificate': '',
'secureTrafficOnly': False
}
fake_lb.add_ssl_termination(**add_ssl_termination_args)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_post_creation_access_list(self):
access_list = [{"address": '192.168.1.1/0',
'type': 'ALLOW'},
{'address': '172.165.3.43',
'type': 'DENY'}]
api_access_list = [{"address": '192.168.1.1/0', 'id': 1234,
'type': 'ALLOW'},
{'address': '172.165.3.43', 'id': 3422,
'type': 'DENY'}]
template = self._set_template(self.lb_template,
accessList=access_list)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_access_list')
fake_lb.get_access_list().AndReturn([])
fake_lb.get_access_list().AndReturn(api_access_list)
self.m.StubOutWithMock(fake_lb, 'add_access_list')
fake_lb.add_access_list(access_list)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_ref_id(self):
"""The Reference ID of the resource is the resource ID."""
template = self._set_template(self.lb_template)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())
def test_post_creation_error_page(self):
error_page = "REALLY BIG ERROR"
template = self._set_template(self.lb_template,
errorPage=error_page)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_error_page')
fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})
fake_lb.get_error_page().AndReturn(
{u'errorpage': {u'content': error_page}})
self.m.StubOutWithMock(fake_lb, 'set_error_page')
fake_lb.set_error_page(error_page)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_post_creation_ssl_termination(self):
ssl_termination_template = {
'securePort': 443,
'privatekey': 'afwefawe',
'certificate': 'fawefwea',
'intermediateCertificate': "intermediate_certificate",
'secureTrafficOnly': False
}
ssl_termination_api = copy.deepcopy(ssl_termination_template)
template = self._set_template(self.lb_template,
sslTermination=ssl_termination_template)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443,
'certificate': 'fawefwea',
'intermediateCertificate': "intermediate_certificate",
'secureTrafficOnly': False,
'enabled': True,
})
self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
fake_lb.add_ssl_termination(**ssl_termination_api)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_post_creation_content_caching(self):
template = self._set_template(self.lb_template,
contentCaching='ENABLED')
rsrc = self._mock_loadbalancer(template, self.lb_name,
self.expected_body)[0]
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_check(self):
stack = mock.Mock()
stack.db_resource_get.return_value = None
stack.has_cache_data.return_value = False
resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)
loadbalancer = lb.CloudLoadBalancer("test", resdef, stack)
loadbalancer._add_event = mock.Mock()
mock_cloud_lb = mock.Mock()
mock_get = mock.Mock(return_value=mock_cloud_lb)
loadbalancer.clb.get = mock_get
mock_cloud_lb.status = 'ACTIVE'
scheduler.TaskRunner(loadbalancer.check)()
self.assertEqual('CHECK', loadbalancer.action)
self.assertEqual('COMPLETE', loadbalancer.status)
mock_cloud_lb.status = 'FOOBAR'
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(loadbalancer.check))
self.assertEqual('CHECK', loadbalancer.action)
self.assertEqual('FAILED', loadbalancer.status)
self.assertIn('FOOBAR', str(exc))
mock_get.side_effect = lb.NotFound('boom')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(loadbalancer.check))
self.assertEqual('CHECK', loadbalancer.action)
self.assertEqual('FAILED', loadbalancer.status)
self.assertIn('boom', str(exc))
def test_update_add_node_by_address(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
fake_lb.nodes = self.expected_body['nodes']
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
expected_ip = '172.168.1.4'
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY",
"weight": 1},
{"addresses": [expected_ip],
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY",
"weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.nodes = [
FakeNode(address=u"172.168.1.4", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"166.78.103.141", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
]
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'add_nodes')
fake_lb.add_nodes([
fake_lb.Node(address=expected_ip,
port=80,
condition='ENABLED',
type="PRIMARY", weight=1)])
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_resolve_attr_noid(self):
stack = mock.Mock()
stack.db_resource_get.return_value = None
stack.has_cache_data.return_value = False
resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)
lbres = lb.CloudLoadBalancer("test", resdef, stack)
self.assertIsNone(lbres._resolve_attribute("PublicIp"))
def test_resolve_attr_virtualips(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4',
type='PUBLIC',
ipVersion="IPv6",
id='test-id')]
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
expected = [{
'ip_version': 'IPv6',
'type': 'PUBLIC',
'id': 'test-id',
'address': '1.2.3.4'}]
self.m.ReplayAll()
self.assertEqual(expected, rsrc._resolve_attribute("virtualIps"))
self.m.VerifyAll()
def test_update_nodes_immutable(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
current_nodes = [
FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1)
]
fake_lb.nodes = current_nodes
fake_lb.tracker = "fake_lb"
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
expected_ip = '4.4.4.4'
update_template['Properties']['nodes'] = [
{"addresses": ["1.1.1.1"], "port": 80, "condition": "ENABLED",
"type": "PRIMARY", "weight": 1},
{"addresses": ["2.2.2.2"], "port": 80, "condition": "DISABLED",
"type": "PRIMARY", "weight": 1},
{"addresses": [expected_ip], "port": 80, "condition": "ENABLED",
"type": "PRIMARY", "weight": 1}
]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.status = "PENDING_UPDATE"
fake_lb1.tracker = "fake_lb1"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # ACTIVE
# Add node `expected_ip`
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
fake_lb2 = copy.deepcopy(fake_lb1)
fake_lb2.status = "ACTIVE"
fake_lb2.nodes = [
FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
]
fake_lb2.tracker = "fake_lb2"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2) # ACTIVE
# Delete node 3.3.3.3
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
fake_lb3 = copy.deepcopy(fake_lb2)
fake_lb3.status = "ACTIVE"
fake_lb3.nodes = [
FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1)
]
fake_lb3.tracker = "fake_lb3"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3) # ACTIVE
# Update node 2.2.2.2
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
fake_lb4 = copy.deepcopy(fake_lb3)
fake_lb4.status = "ACTIVE"
fake_lb4.nodes = [
FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"2.2.2.2", port=80, condition=u"DISABLED",
type="PRIMARY", weight=1),
FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
type="PRIMARY", weight=1)
]
fake_lb4.tracker = "fake_lb4"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4) # ACTIVE
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_pending_update_status(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['name'] = "updated_name"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.name = "updated_name"
fake_lb1.status = "PENDING_UPDATE" # lb is immutable
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.name = "updated_name"
fake_lb2.status = "ACTIVE"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_immutable_exception(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['name'] = "updated_name"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # initial iteration
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # immutable
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.name = "updated_name"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # after update
self.m.StubOutWithMock(fake_lb, 'update')
msg = ("Load Balancer '%s' has a status of 'PENDING_UPDATE' and "
"is considered immutable." % rsrc.resource_id)
fake_lb.update(name="updated_name").AndRaise(Exception(msg))
fake_lb.update(name="updated_name").AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_immutable_exception(self):
access_list = [{"address": '192.168.1.1/0',
'type': 'ALLOW'},
{'address': '172.165.3.43',
'type': 'DENY'}]
template = self._set_template(self.lb_template,
accessList=access_list)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_access_list')
fake_lb.get_access_list().AndReturn({})
fake_lb.get_access_list().AndReturn({})
fake_lb.get_access_list().AndReturn(access_list)
self.m.StubOutWithMock(fake_lb, 'add_access_list')
msg = ("Load Balancer '%s' has a status of 'PENDING_UPDATE' and "
"is considered immutable." % rsrc.resource_id)
fake_lb.add_access_list(access_list).AndRaise(Exception(msg))
fake_lb.add_access_list(access_list)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
def test_update_lb_name(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['name'] = "updated_name"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.name = "updated_name"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(name="updated_name")
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_multiple(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['name'] = "updated_name"
update_template['Properties']['algorithm'] = "RANDOM"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.name = "updated_name"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.algorithm = "RANDOM"
fake_lb2.name = "updated_name"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(name="updated_name", algorithm="RANDOM")
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_algorithm(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['algorithm'] = "RANDOM"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.algorithm = "ROUND_ROBIN"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb1, 'update')
fake_lb1.update(algorithm="RANDOM")
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.algorithm = "RANDOM"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_protocol(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['protocol'] = "IMAPS"
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.protocol = "IMAPS"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(protocol="IMAPS")
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_redirect(self):
template = self._set_template(
self.lb_template, protocol="HTTPS")
expected = self._set_expected(
self.expected_body, protocol="HTTPS")
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['httpsRedirect'] = True
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.httpsRedirect = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(httpsRedirect=True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_lb_redirect_https(self):
template = self._set_template(
self.lb_template, protocol="HTTPS", httpsRedirect=True)
expected = self._set_expected(
self.expected_body, protocol="HTTPS", httpsRedirect=True)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_lb_redirect_HTTP_with_SSL_term(self):
ssl_termination_template = {
'privatekey': private_key,
'intermediateCertificate': 'fwaefawe',
'secureTrafficOnly': True,
'securePort': 443,
'certificate': cert
}
ssl_termination_api = copy.deepcopy(ssl_termination_template)
ssl_termination_api['enabled'] = True
del ssl_termination_api['privatekey']
template = self._set_template(
self.lb_template, sslTermination=ssl_termination_template,
protocol="HTTP", httpsRedirect=True)
expected = self._set_expected(
self.expected_body, protocol="HTTP", httpsRedirect=False)
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected)
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'create')
rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.httpsRedirect = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)
self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')
fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
def test_update_lb_half_closed(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['halfClosed'] = True
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.halfClosed = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(halfClosed=True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_port(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['port'] = 1234
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.port = 1234
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(port=1234)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_lb_timeout(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['timeout'] = 120
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.timeout = 120
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb, 'update')
fake_lb.update(timeout=120)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_health_monitor_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['healthMonitor'] = {
'type': "HTTP", 'delay': 10, 'timeout': 10,
'attemptsBeforeDeactivation': 4, 'path': "/",
'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
'hostHeader': "example.com"}
self.m.StubOutWithMock(fake_lb, 'get_health_monitor')
fake_lb.get_health_monitor().AndReturn({})
fake_lb.get_health_monitor().AndReturn(
{'type': "HTTP", 'delay': 10, 'timeout': 10,
'attemptsBeforeDeactivation': 4, 'path': "/",
'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
'hostHeader': "example.com"})
self.m.StubOutWithMock(fake_lb, 'add_health_monitor')
fake_lb.add_health_monitor(
attemptsBeforeDeactivation=4, bodyRegex='.* testing .*', delay=10,
hostHeader='example.com', path='/',
statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_health_monitor_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
hm = {'type': "HTTP", 'delay': 10, 'timeout': 10,
'attemptsBeforeDeactivation': 4, 'path': "/",
'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
'hostHeader': "example.com"}
template['Resources'][lb_name]['Properties']['healthMonitor'] = hm
expected_body = copy.deepcopy(self.expected_body)
expected_body['healthMonitor'] = hm
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['healthMonitor']
self.m.StubOutWithMock(fake_lb, 'get_health_monitor')
fake_lb.get_health_monitor().AndReturn(
{'type': "HTTP", 'delay': 10, 'timeout': 10,
'attemptsBeforeDeactivation': 4, 'path': "/",
'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
'hostHeader': "example.com"})
fake_lb.get_health_monitor().AndReturn({})
self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')
fake_lb.delete_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_session_persistence_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('SOURCE_IP', fake_lb.session_persistence)
self.m.VerifyAll()
def test_update_session_persistence_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'sessionPersistence'] = "SOURCE_IP"
expected_body = copy.deepcopy(self.expected_body)
expected_body['sessionPersistence'] = {'persistenceType': "SOURCE_IP"}
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['sessionPersistence']
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('', fake_lb.session_persistence)
self.m.VerifyAll()
def test_update_ssl_termination_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['sslTermination'] = {
'securePort': 443, 'privatekey': private_key, 'certificate': cert,
'secureTrafficOnly': False, 'intermediateCertificate': ''}
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443, 'certificate': cert,
'secureTrafficOnly': False, 'enabled': True})
self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
fake_lb.add_ssl_termination(
securePort=443, privatekey=private_key, certificate=cert,
secureTrafficOnly=False, intermediateCertificate='')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_ssl_termination_delete(self):
template = copy.deepcopy(self.lb_template)
ssl_termination_template = {
'securePort': 443, 'privatekey': private_key, 'certificate': cert,
'intermediateCertificate': '', 'secureTrafficOnly': False}
ssl_termination_api = copy.deepcopy(ssl_termination_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties']['sslTermination'] = (
ssl_termination_template)
# The SSL termination config is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({})
self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
fake_lb.add_ssl_termination(**ssl_termination_api)
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443, 'certificate': cert,
'secureTrafficOnly': False, 'enabled': True})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['sslTermination']
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
fake_lb)
self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
fake_lb.get_ssl_termination().AndReturn({
'securePort': 443, 'certificate': cert,
'secureTrafficOnly': False})
self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')
fake_lb.delete_ssl_termination()
fake_lb.get_ssl_termination().AndReturn({})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_metadata_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['metadata'] = {'a': 1, 'b': 2}
self.m.StubOutWithMock(fake_lb, 'get_metadata')
fake_lb.get_metadata().AndReturn({})
fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})
self.m.StubOutWithMock(fake_lb, 'set_metadata')
fake_lb.set_metadata({'a': 1, 'b': 2})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_metadata_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties']['metadata'] = {
'a': 1, 'b': 2}
expected_body = copy.deepcopy(self.expected_body)
expected_body['metadata'] = mox.SameElementsAs(
[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2}])
rsrc, fake_lb = self._mock_loadbalancer(
template, self.lb_name, expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['metadata']
self.m.StubOutWithMock(fake_lb, 'get_metadata')
fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})
fake_lb.get_metadata().AndReturn({})
self.m.StubOutWithMock(fake_lb, 'delete_metadata')
fake_lb.delete_metadata()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_errorpage_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error_page = (
'<html><head><title>Service Unavailable</title></head><body><h2>'
'Service Unavailable</h2>The service is unavailable</body></html>')
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['errorPage'] = error_page
self.m.StubOutWithMock(fake_lb, 'get_error_page')
fake_lb.get_error_page().AndReturn(
{'errorpage': {'content': 'foo'}})
fake_lb.get_error_page().AndReturn(
{'errorpage': {'content': error_page}})
self.m.StubOutWithMock(fake_lb, 'set_error_page')
fake_lb.set_error_page(error_page)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_errorpage_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
error_page = (
'<html><head><title>Service Unavailable</title></head><body><h2>'
'Service Unavailable</h2>The service is unavailable</body></html>')
template['Resources'][lb_name]['Properties']['errorPage'] = error_page
# The error page config is done post-creation, so no need to
# modify self.expected_body
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_error_page')
fake_lb.get_error_page().AndReturn({})
self.m.StubOutWithMock(fake_lb, 'set_error_page')
fake_lb.set_error_page(error_page)
fake_lb.get_error_page().AndReturn({'errorpage':
{'content': error_page}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['errorPage']
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
fake_lb)
self.m.StubOutWithMock(fake_lb, 'clear_error_page')
fake_lb.clear_error_page()
self.m.StubOutWithMock(fake_lb, 'get_error_page')
fake_lb.get_error_page().AndReturn(
{'errorpage': {'content': error_page}})
fake_lb.get_error_page().AndReturn({'errorpage': {'content': ""}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_connection_logging_enable(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['connectionLogging'] = True
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertTrue(fake_lb.connection_logging)
self.m.VerifyAll()
def test_update_connection_logging_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'connectionLogging'] = True
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionLogging'] = {'enabled': True}
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.connection_logging = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.connection_logging = False
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['connectionLogging']
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertFalse(fake_lb.connection_logging)
self.m.VerifyAll()
def test_update_connection_logging_disable(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'connectionLogging'] = True
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionLogging'] = {'enabled': True}
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['connectionLogging'] = False
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertFalse(fake_lb.connection_logging)
self.m.VerifyAll()
def test_update_connection_throttle_add(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['connectionThrottle'] = {
'maxConnections': 1000}
self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')
self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')
fake_lb.get_connection_throttle().AndReturn(
{'maxConnectionRate': None, 'minConnections': None,
'rateInterval': None, 'maxConnections': 100})
fake_lb.add_connection_throttle(
maxConnections=1000, maxConnectionRate=None, minConnections=None,
rateInterval=None)
fake_lb.get_connection_throttle().AndReturn(
{'maxConnectionRate': None, 'minConnections': None,
'rateInterval': None, 'maxConnections': 1000})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_connection_throttle_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'connectionThrottle'] = {'maxConnections': 1000}
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionThrottle'] = {
'maxConnections': 1000, 'maxConnectionRate': None,
'rateInterval': None, 'minConnections': None}
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['connectionThrottle']
self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')
fake_lb.get_connection_throttle().AndReturn({
'maxConnections': 1000, 'maxConnectionRate': None,
'rateInterval': None, 'minConnections': None})
self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')
fake_lb.delete_connection_throttle()
fake_lb.get_connection_throttle().AndReturn({})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_content_caching_enable(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['contentCaching'] = 'ENABLED'
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.content_caching = False
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.content_caching = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_content_caching_deleted(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'contentCaching'] = 'ENABLED'
# Enabling the content cache is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
del update_template['Properties']['contentCaching']
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.content_caching = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.content_caching = False
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_content_caching_disable(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
template['Resources'][lb_name]['Properties'][
'contentCaching'] = 'ENABLED'
# Enabling the content cache is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['contentCaching'] = 'DISABLED'
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.content_caching = True
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.content_caching = False
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
template = self._set_template(self.lb_template,
contentCaching='ENABLED')
rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_immutable(self):
template = self._set_template(self.lb_template,
contentCaching='ENABLED')
rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))
self.m.StubOutWithMock(fake_lb, 'delete')
fake_lb.delete().AndRaise(Exception('immutable'))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_non_immutable_exc(self):
template = self._set_template(self.lb_template,
contentCaching='ENABLED')
rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
self.m.StubOutWithMock(fake_lb, 'delete')
fake_lb.delete().AndRaise(FakeException())
self.m.ReplayAll()
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertIn('FakeException', six.text_type(exc))
self.m.VerifyAll()
def test_delete_states(self):
template = self._set_template(self.lb_template,
contentCaching='ENABLED')
rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
self.expected_body)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.m.UnsetStubs()
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb3 = copy.deepcopy(fake_lb)
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1.status = 'ACTIVE'
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
fake_lb2.status = 'PENDING_DELETE'
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
fake_lb3.status = 'DELETED'
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_redir(self):
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
mock_stack.has_cache_data.return_value = False
props = {'httpsRedirect': True,
'protocol': 'HTTPS',
'port': 443,
'nodes': [],
'virtualIps': [{'id': '1234'}]}
mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
LoadBalancerWithFakeClient,
properties=props)
mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
self.assertIsNone(mock_lb.validate())
props['protocol'] = 'HTTP'
props['sslTermination'] = {
'secureTrafficOnly': True,
'securePort': 443,
'privatekey': "bobloblaw",
'certificate': 'mycert'
}
mock_resdef = rsrc_defn.ResourceDefinition("test_lb_2",
LoadBalancerWithFakeClient,
properties=props)
mock_lb = lb.CloudLoadBalancer("test_2", mock_resdef, mock_stack)
self.assertIsNone(mock_lb.validate())
def test_invalid_redir_proto(self):
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
mock_stack.has_cache_data.return_value = False
props = {'httpsRedirect': True,
'protocol': 'TCP',
'port': 1234,
'nodes': [],
'virtualIps': [{'id': '1234'}]}
mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
LoadBalancerWithFakeClient,
properties=props)
mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
ex = self.assertRaises(exception.StackValidationFailed,
mock_lb.validate)
self.assertIn("HTTPS redirect is only available", six.text_type(ex))
def test_invalid_redir_ssl(self):
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
mock_stack.has_cache_data.return_value = False
props = {'httpsRedirect': True,
'protocol': 'HTTP',
'port': 1234,
'nodes': [],
'virtualIps': [{'id': '1234'}]}
mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
LoadBalancerWithFakeClient,
properties=props)
mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
ex = self.assertRaises(exception.StackValidationFailed,
mock_lb.validate)
self.assertIn("HTTPS redirect is only available", six.text_type(ex))
props['sslTermination'] = {
'secureTrafficOnly': False,
'securePort': 443,
'privatekey': "bobloblaw",
'certificate': 'mycert'
}
mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
ex = self.assertRaises(exception.StackValidationFailed,
mock_lb.validate)
self.assertIn("HTTPS redirect is only available", six.text_type(ex))
props['sslTermination'] = {
'secureTrafficOnly': True,
'securePort': 1234,
'privatekey': "bobloblaw",
'certificate': 'mycert'
}
mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
ex = self.assertRaises(exception.StackValidationFailed,
mock_lb.validate)
self.assertIn("HTTPS redirect is only available", six.text_type(ex))
def test_update_nodes_condition_draining(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
fake_lb.nodes = self.expected_body['nodes']
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
expected_ip = '172.168.1.4'
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
"condition": "DRAINING",
"type": "PRIMARY",
"weight": 1},
{"addresses": [expected_ip],
"port": 80,
"condition": "DRAINING",
"type": "PRIMARY",
"weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb1, 'add_nodes')
fake_lb1.add_nodes([
fake_lb1.Node(address=expected_ip,
port=80,
condition='DRAINING',
type="PRIMARY", weight=1)])
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.nodes = [
FakeNode(address=u"166.78.103.141", port=80,
condition=u"DRAINING", type="PRIMARY", weight=1),
FakeNode(address=u"172.168.1.4", port=80,
condition=u"DRAINING", type="PRIMARY", weight=1),
]
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_nodes_add_same_address_different_port(self):
rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
self.lb_name,
self.expected_body)
fake_lb.nodes = self.expected_body['nodes']
fake_lb.tracker = "fake_lb"
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY",
"weight": 1},
{"addresses": ["166.78.103.141"],
"port": 81,
"condition": "ENABLED",
"type": "PRIMARY",
"weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb1, 'add_nodes')
fake_lb1.add_nodes([
fake_lb1.Node(address="166.78.103.141",
port=81,
condition='ENABLED',
type="PRIMARY", weight=1)])
fake_lb1.tracker = "fake_lb1"
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.nodes = [
FakeNode(address=u"166.78.103.141", port=80,
condition=u"ENABLED", type="PRIMARY", weight=1),
FakeNode(address=u"166.78.103.141", port=81,
condition=u"ENABLED", type="PRIMARY", weight=1),
]
fake_lb2.tracker = "fake_lb2"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update_nodes_defaults(self):
template = copy.deepcopy(self.lb_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]
tmpl_node['type'] = "PRIMARY"
tmpl_node['condition'] = "ENABLED"
tmpl_node['weight'] = 1
expected_body = copy.deepcopy(self.expected_body)
expected_body['nodes'] = [FakeNode(address=u"166.78.103.141", port=80,
condition=u"ENABLED",
type="PRIMARY", weight=1)]
rsrc, fake_lb = self._mock_loadbalancer(template,
self.lb_name,
expected_body)
fake_lb.nodes = self.expected_body['nodes']
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
fake_lb1 = copy.deepcopy(fake_lb)
rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)
self.m.StubOutWithMock(fake_lb1, 'add_nodes')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
|
normal
|
{
"blob_id": "2ab303a2f36cdd64e2119856312dd5e38ee728d6",
"index": 9632,
"step-1": "<mask token>\n\n\nclass LoadBalancerTest(common.HeatTestCase):\n\n def setUp(self):\n super(LoadBalancerTest, self).setUp()\n self.lb_template = {'AWSTemplateFormatVersion': '2010-09-09',\n 'Description': 'fawef', 'Resources': {self.\n _get_lb_resource_name(): {'Type':\n 'Rackspace::Cloud::LoadBalancer', 'Properties': {'name':\n 'test-clb', 'nodes': [{'addresses': ['166.78.103.141'], 'port':\n 80, 'condition': 'ENABLED'}], 'protocol': 'HTTP', 'port': 80,\n 'virtualIps': [{'type': 'PUBLIC', 'ipVersion': 'IPV6'}],\n 'algorithm': 'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000}, 'timeout': 110, 'contentCaching':\n 'DISABLED'}}}}\n self.lb_name = 'test-clb'\n self.expected_body = {'nodes': [FakeNode(address=u'166.78.103.141',\n port=80, condition=u'ENABLED', type=u'PRIMARY', weight=1)],\n 'protocol': u'HTTP', 'port': 80, 'virtual_ips': [FakeVirtualIP(\n type=u'PUBLIC', ipVersion=u'IPV6')], 'algorithm':\n u'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000, 'maxConnections': None,\n 'rateInterval': None, 'minConnections': None},\n 'connectionLogging': None, 'halfClosed': None, 'healthMonitor':\n None, 'metadata': None, 'sessionPersistence': None, 'timeout': \n 110, 'httpsRedirect': False}\n lb.resource_mapping = override_resource\n resource._register_class('Rackspace::Cloud::LoadBalancer',\n LoadBalancerWithFakeClient)\n\n def _get_lb_resource_name(self):\n return 'lb-' + str(uuid.uuid4())\n\n def __getattribute__(self, name):\n if name == 'expected_body' or name == 'lb_template':\n return copy.deepcopy(super(LoadBalancerTest, self).\n __getattribute__(name))\n return super(LoadBalancerTest, self).__getattribute__(name)\n\n def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):\n resource_defns = tmpl.resource_definitions(stack)\n rsrc = LoadBalancerWithFakeClient(resource_name, resource_defns[\n resource_name], stack)\n fake_lb = FakeLoadBalancer(name=lb_name)\n fake_lb.status = 'ACTIVE'\n fake_lb.resource_id = 1234\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n return rsrc, fake_lb\n\n def _get_first_resource_name(self, templ):\n return next(k for k in templ['Resources'])\n\n def _mock_loadbalancer(self, lb_template, expected_name, expected_body):\n t = template_format.parse(json.dumps(lb_template))\n self.stack = utils.parse_stack(t, stack_name=utils.random_name())\n rsrc, fake_lb = self._mock_create(self.stack.t, self.stack, self.\n _get_first_resource_name(lb_template), expected_name, expected_body\n )\n return rsrc, fake_lb\n\n def _set_template(self, templ, **kwargs):\n for k, v in six.iteritems(kwargs):\n templ['Resources'][self._get_first_resource_name(templ)][\n 'Properties'][k] = v\n return templ\n\n def _set_expected(self, expected, **kwargs):\n for k, v in six.iteritems(kwargs):\n expected[k] = v\n return expected\n\n def test_process_node(self):\n nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True}, {\n 'addresses': ['4567', '8901', '8903'], 'port': 80, 'enabled': \n True}, {'addresses': [], 'port': 80, 'enabled': True}]\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},\n {'address': '4567', 'port': 80, 'enabled': True}, {'address':\n '8901', 'port': 80, 'enabled': True}, {'address': '8903',\n 'port': 80, 'enabled': True}]\n self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))\n\n def test_nodeless(self):\n \"\"\"It's possible to create a LoadBalancer resource with no nodes.\"\"\"\n template = self._set_template(self.lb_template, nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_alter_properties(self):\n template = self._set_template(self.lb_template, sessionPersistence=\n 'HTTP_COOKIE', connectionLogging=True, metadata={'yolo':\n 'heeyyy_gurl'})\n expected = self._set_expected(self.expected_body,\n sessionPersistence={'persistenceType': 'HTTP_COOKIE'},\n connectionLogging={'enabled': True}, metadata=[{'key': 'yolo',\n 'value': 'heeyyy_gurl'}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n <mask token>\n <mask token>\n\n def test_validate_health_monitor(self):\n health_monitor = {'type': 'CONNECT', 'attemptsBeforeDeactivation': \n 1, 'delay': 1, 'timeout': 1}\n template = self._set_template(self.lb_template, healthMonitor=\n health_monitor)\n expected = self._set_expected(self.expected_body, healthMonitor=\n health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n health_monitor['bodyRegex'] = 'dfawefawe'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Unknown Property bodyRegex', str(exc))\n health_monitor['type'] = 'HTTP'\n health_monitor['bodyRegex'] = 'bodyRegex'\n health_monitor['statusRegex'] = 'statusRegex'\n health_monitor['hostHeader'] = 'hostHeader'\n health_monitor['path'] = 'path'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_ssl_termination(self):\n ssl_termination = {'privatekey': 'ewfawe',\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': True}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination)\n expected = self._set_expected(self.expected_body, sslTermination=\n ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Property certificate not assigned', six.text_type(exc))\n ssl_termination['certificate'] = 'dfaewfwef'\n template = self._set_template(template, sslTermination=ssl_termination)\n expected = self._set_expected(expected, sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_ssl_termination_unstripped_certificates(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '\\n\\nintermediate_certificate\\n',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ssl_termination_intermediateCertificate_None(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': None, 'secureTrafficOnly': False}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n add_ssl_termination_args = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '', 'secureTrafficOnly': False}\n fake_lb.add_ssl_termination(**add_ssl_termination_args)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_access_list(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n api_access_list = [{'address': '192.168.1.1/0', 'id': 1234, 'type':\n 'ALLOW'}, {'address': '172.165.3.43', 'id': 3422, 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn([])\n fake_lb.get_access_list().AndReturn(api_access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ref_id(self):\n \"\"\"The Reference ID of the resource is the resource ID.\"\"\"\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())\n\n def test_post_creation_error_page(self):\n error_page = 'REALLY BIG ERROR'\n template = self._set_template(self.lb_template, errorPage=error_page)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_ssl_termination(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': 'fawefwea',\n 'intermediateCertificate': 'intermediate_certificate',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_content_caching(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc = self._mock_loadbalancer(template, self.lb_name, self.\n expected_body)[0]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_check(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n loadbalancer = lb.CloudLoadBalancer('test', resdef, stack)\n loadbalancer._add_event = mock.Mock()\n mock_cloud_lb = mock.Mock()\n mock_get = mock.Mock(return_value=mock_cloud_lb)\n loadbalancer.clb.get = mock_get\n mock_cloud_lb.status = 'ACTIVE'\n scheduler.TaskRunner(loadbalancer.check)()\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('COMPLETE', loadbalancer.status)\n mock_cloud_lb.status = 'FOOBAR'\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('FOOBAR', str(exc))\n mock_get.side_effect = lb.NotFound('boom')\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('boom', str(exc))\n\n def test_update_add_node_by_address(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.nodes = [FakeNode(address=u'172.168.1.4', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=80, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'add_nodes')\n fake_lb.add_nodes([fake_lb.Node(address=expected_ip, port=80,\n condition='ENABLED', type='PRIMARY', weight=1)])\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_resolve_attr_noid(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n lbres = lb.CloudLoadBalancer('test', resdef, stack)\n self.assertIsNone(lbres._resolve_attribute('PublicIp'))\n\n def test_resolve_attr_virtualips(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4', type=\n 'PUBLIC', ipVersion='IPv6', id='test-id')]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n expected = [{'ip_version': 'IPv6', 'type': 'PUBLIC', 'id':\n 'test-id', 'address': '1.2.3.4'}]\n self.m.ReplayAll()\n self.assertEqual(expected, rsrc._resolve_attribute('virtualIps'))\n self.m.VerifyAll()\n\n def test_update_nodes_immutable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n current_nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb.nodes = current_nodes\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '4.4.4.4'\n update_template['Properties']['nodes'] = [{'addresses': ['1.1.1.1'],\n 'port': 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight':\n 1}, {'addresses': ['2.2.2.2'], 'port': 80, 'condition':\n 'DISABLED', 'type': 'PRIMARY', 'weight': 1}, {'addresses': [\n expected_ip], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.status = 'PENDING_UPDATE'\n fake_lb1.tracker = 'fake_lb1'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb1)\n fake_lb2.status = 'ACTIVE'\n fake_lb2.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'4.4.4.4', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb3 = copy.deepcopy(fake_lb2)\n fake_lb3.status = 'ACTIVE'\n fake_lb3.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb3.tracker = 'fake_lb3'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb4 = copy.deepcopy(fake_lb3)\n fake_lb4.status = 'ACTIVE'\n fake_lb4.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'DISABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb4.tracker = 'fake_lb4'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_pending_update_status(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n fake_lb1.status = 'PENDING_UPDATE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.name = 'updated_name'\n fake_lb2.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_immutable_exception(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.update(name='updated_name').AndRaise(Exception(msg))\n fake_lb.update(name='updated_name').AndReturn(None)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_create_immutable_exception(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn(access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.add_access_list(access_list).AndRaise(Exception(msg))\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_update_lb_name(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_multiple(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n fake_lb2.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name', algorithm='RANDOM')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n <mask token>\n\n def test_update_lb_protocol(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['protocol'] = 'IMAPS'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.protocol = 'IMAPS'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(protocol='IMAPS')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_redirect(self):\n template = self._set_template(self.lb_template, protocol='HTTPS')\n expected = self._set_expected(self.expected_body, protocol='HTTPS')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['httpsRedirect'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(httpsRedirect=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n <mask token>\n\n def test_lb_redirect_HTTP_with_SSL_term(self):\n ssl_termination_template = {'privatekey': private_key,\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': \n True, 'securePort': 443, 'certificate': cert}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n ssl_termination_api['enabled'] = True\n del ssl_termination_api['privatekey']\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template, protocol='HTTP', httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTP',\n httpsRedirect=False)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n\n def test_update_lb_half_closed(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['halfClosed'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.halfClosed = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(halfClosed=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['port'] = 1234\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.port = 1234\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(port=1234)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_timeout(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['timeout'] = 120\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.timeout = 120\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(timeout=120)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['healthMonitor'] = {'type': 'HTTP',\n 'delay': 10, 'timeout': 10, 'attemptsBeforeDeactivation': 4,\n 'path': '/', 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'}\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({})\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n self.m.StubOutWithMock(fake_lb, 'add_health_monitor')\n fake_lb.add_health_monitor(attemptsBeforeDeactivation=4, bodyRegex=\n '.* testing .*', delay=10, hostHeader='example.com', path='/',\n statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n hm = {'type': 'HTTP', 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': '/', 'statusRegex':\n '^[234][0-9][0-9]$', 'bodyRegex': '.* testing .*', 'hostHeader':\n 'example.com'}\n template['Resources'][lb_name]['Properties']['healthMonitor'] = hm\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['healthMonitor'] = hm\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['healthMonitor']\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n fake_lb.get_health_monitor().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')\n fake_lb.delete_health_monitor()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_session_persistence_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('SOURCE_IP', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_session_persistence_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sessionPersistence'\n ] = 'SOURCE_IP'\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['sessionPersistence'] = {'persistenceType': 'SOURCE_IP'}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sessionPersistence']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sslTermination'] = {'securePort': \n 443, 'privatekey': private_key, 'certificate': cert,\n 'secureTrafficOnly': False, 'intermediateCertificate': ''}\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(securePort=443, privatekey=private_key,\n certificate=cert, secureTrafficOnly=False,\n intermediateCertificate='')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_delete(self):\n template = copy.deepcopy(self.lb_template)\n ssl_termination_template = {'securePort': 443, 'privatekey':\n private_key, 'certificate': cert, 'intermediateCertificate': '',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sslTermination'\n ] = ssl_termination_template\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sslTermination']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False})\n self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')\n fake_lb.delete_ssl_termination()\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['metadata'] = {'a': 1, 'b': 2}\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({})\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n self.m.StubOutWithMock(fake_lb, 'set_metadata')\n fake_lb.set_metadata({'a': 1, 'b': 2})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['metadata'] = {'a': 1,\n 'b': 2}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['metadata'] = mox.SameElementsAs([{'key': 'a',\n 'value': 1}, {'key': 'b', 'value': 2}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['metadata']\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n fake_lb.get_metadata().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_metadata')\n fake_lb.delete_metadata()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['errorPage'] = error_page\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': 'foo'}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n template['Resources'][lb_name]['Properties']['errorPage'] = error_page\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['errorPage']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'clear_error_page')\n fake_lb.clear_error_page()\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': ''}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_logging_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = True\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertTrue(fake_lb.connection_logging)\n self.m.VerifyAll()\n <mask token>\n\n def test_update_connection_logging_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = False\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionThrottle'] = {'maxConnections'\n : 1000}\n self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 100})\n fake_lb.add_connection_throttle(maxConnections=1000,\n maxConnectionRate=None, minConnections=None, rateInterval=None)\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 1000})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionThrottle'] = {\n 'maxConnections': 1000}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionThrottle'] = {'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionThrottle']\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None})\n self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')\n fake_lb.delete_connection_throttle()\n fake_lb.get_connection_throttle().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'ENABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_deleted(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['contentCaching']\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n <mask token>\n\n def test_delete(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_immutable(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(Exception('immutable'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_non_immutable_exc(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(FakeException())\n self.m.ReplayAll()\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(rsrc.delete))\n self.assertIn('FakeException', six.text_type(exc))\n self.m.VerifyAll()\n\n def test_delete_states(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb3 = copy.deepcopy(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2.status = 'PENDING_DELETE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n fake_lb3.status = 'DELETED'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_redir(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTPS', 'port': 443,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n props['protocol'] = 'HTTP'\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb_2',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test_2', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n\n def test_invalid_redir_proto(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'TCP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_invalid_redir_ssl(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': False, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 1234, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_update_nodes_condition_draining(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'DRAINING', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'DRAINING', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address=expected_ip, port=80,\n condition='DRAINING', type='PRIMARY', weight=1)])\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'DRAINING', type='PRIMARY', weight=1), FakeNode(\n address=u'172.168.1.4', port=80, condition=u'DRAINING', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n <mask token>\n\n def test_update_nodes_defaults(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]\n tmpl_node['type'] = 'PRIMARY'\n tmpl_node['condition'] = 'ENABLED'\n tmpl_node['weight'] = 1\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = [FakeNode(address=u'166.78.103.141', port=\n 80, condition=u'ENABLED', type='PRIMARY', weight=1)]\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n",
"step-2": "<mask token>\n\n\nclass FakeLoadBalancer(object):\n\n def __init__(self, name=None, info=None, *args, **kwargs):\n name = name or uuid.uuid4()\n info = info or {'fake': 'fake'}\n self.id = uuid.uuid4()\n self.manager = FakeLoadBalancerManager()\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n self.nodes = []\n self.algorithm = 'ROUND_ROBIN'\n self.session_persistence = 'HTTP_COOKIE'\n self.connection_logging = False\n self.timeout = None\n self.httpsRedirect = False\n self.protocol = None\n self.port = None\n self.name = None\n self.halfClosed = None\n self.content_caching = False\n\n def get(self, *args, **kwargs):\n pass\n\n def add_nodes(self, *args, **kwargs):\n pass\n\n def add_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_error_page(self, *args, **kwargs):\n pass\n\n def clear_error_page(self, *args, **kwargs):\n pass\n\n def add_access_list(self, *args, **kwargs):\n pass\n <mask token>\n\n def add_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_metadata(self, *args, **kwargs):\n pass\n <mask token>\n\n def add_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete(self, *args, **kwargs):\n pass\n\n def get_health_monitor(self, *args, **kwargs):\n return {}\n\n def get_metadata(self, *args, **kwargs):\n return {}\n <mask token>\n <mask token>\n <mask token>\n\n def get_access_list(self, *args, **kwargs):\n pass\n\n\nclass LoadBalancerWithFakeClient(lb.CloudLoadBalancer):\n\n def cloud_lb(self):\n return FakeLoadBalancerClient()\n\n\n<mask token>\n\n\nclass LoadBalancerTest(common.HeatTestCase):\n\n def setUp(self):\n super(LoadBalancerTest, self).setUp()\n self.lb_template = {'AWSTemplateFormatVersion': '2010-09-09',\n 'Description': 'fawef', 'Resources': {self.\n _get_lb_resource_name(): {'Type':\n 'Rackspace::Cloud::LoadBalancer', 'Properties': {'name':\n 'test-clb', 'nodes': [{'addresses': ['166.78.103.141'], 'port':\n 80, 'condition': 'ENABLED'}], 'protocol': 'HTTP', 'port': 80,\n 'virtualIps': [{'type': 'PUBLIC', 'ipVersion': 'IPV6'}],\n 'algorithm': 'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000}, 'timeout': 110, 'contentCaching':\n 'DISABLED'}}}}\n self.lb_name = 'test-clb'\n self.expected_body = {'nodes': [FakeNode(address=u'166.78.103.141',\n port=80, condition=u'ENABLED', type=u'PRIMARY', weight=1)],\n 'protocol': u'HTTP', 'port': 80, 'virtual_ips': [FakeVirtualIP(\n type=u'PUBLIC', ipVersion=u'IPV6')], 'algorithm':\n u'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000, 'maxConnections': None,\n 'rateInterval': None, 'minConnections': None},\n 'connectionLogging': None, 'halfClosed': None, 'healthMonitor':\n None, 'metadata': None, 'sessionPersistence': None, 'timeout': \n 110, 'httpsRedirect': False}\n lb.resource_mapping = override_resource\n resource._register_class('Rackspace::Cloud::LoadBalancer',\n LoadBalancerWithFakeClient)\n\n def _get_lb_resource_name(self):\n return 'lb-' + str(uuid.uuid4())\n\n def __getattribute__(self, name):\n if name == 'expected_body' or name == 'lb_template':\n return copy.deepcopy(super(LoadBalancerTest, self).\n __getattribute__(name))\n return super(LoadBalancerTest, self).__getattribute__(name)\n\n def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):\n resource_defns = tmpl.resource_definitions(stack)\n rsrc = LoadBalancerWithFakeClient(resource_name, resource_defns[\n resource_name], stack)\n fake_lb = FakeLoadBalancer(name=lb_name)\n fake_lb.status = 'ACTIVE'\n fake_lb.resource_id = 1234\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n return rsrc, fake_lb\n\n def _get_first_resource_name(self, templ):\n return next(k for k in templ['Resources'])\n\n def _mock_loadbalancer(self, lb_template, expected_name, expected_body):\n t = template_format.parse(json.dumps(lb_template))\n self.stack = utils.parse_stack(t, stack_name=utils.random_name())\n rsrc, fake_lb = self._mock_create(self.stack.t, self.stack, self.\n _get_first_resource_name(lb_template), expected_name, expected_body\n )\n return rsrc, fake_lb\n\n def _set_template(self, templ, **kwargs):\n for k, v in six.iteritems(kwargs):\n templ['Resources'][self._get_first_resource_name(templ)][\n 'Properties'][k] = v\n return templ\n\n def _set_expected(self, expected, **kwargs):\n for k, v in six.iteritems(kwargs):\n expected[k] = v\n return expected\n\n def test_process_node(self):\n nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True}, {\n 'addresses': ['4567', '8901', '8903'], 'port': 80, 'enabled': \n True}, {'addresses': [], 'port': 80, 'enabled': True}]\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},\n {'address': '4567', 'port': 80, 'enabled': True}, {'address':\n '8901', 'port': 80, 'enabled': True}, {'address': '8903',\n 'port': 80, 'enabled': True}]\n self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))\n\n def test_nodeless(self):\n \"\"\"It's possible to create a LoadBalancer resource with no nodes.\"\"\"\n template = self._set_template(self.lb_template, nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_alter_properties(self):\n template = self._set_template(self.lb_template, sessionPersistence=\n 'HTTP_COOKIE', connectionLogging=True, metadata={'yolo':\n 'heeyyy_gurl'})\n expected = self._set_expected(self.expected_body,\n sessionPersistence={'persistenceType': 'HTTP_COOKIE'},\n connectionLogging={'enabled': True}, metadata=[{'key': 'yolo',\n 'value': 'heeyyy_gurl'}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_validate_vip(self):\n snippet = {'nodes': [], 'protocol': 'HTTP', 'port': 80,\n 'halfClosed': None, 'algorithm': u'LEAST_CONNECTIONS',\n 'virtualIps': [{'id': '1234'}]}\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = rsrc_defn.ResourceDefinition('testvip', lb.\n CloudLoadBalancer, properties=snippet)\n rsrc = lb.CloudLoadBalancer('testvip', resdef, stack)\n self.assertIsNone(rsrc.validate())\n snippet['virtualIps'][0]['type'] = 'PUBLIC'\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Cannot specify type or version', str(exc))\n snippet['virtualIps'] = [{}]\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Must specify VIP type and version', str(exc))\n\n def test_validate_half_closed(self):\n template = self._set_template(self.lb_template, halfClosed=True)\n expected = self._set_expected(self.expected_body, halfClosed=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn(\n 'The halfClosed property is only available for the TCP or TCP_CLIENT_FIRST protocols'\n , str(exc))\n template = self._set_template(template, protocol='TCP')\n expected = self._set_expected(expected, protocol='TCP')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n template = self._set_template(template, protocol='TCP_CLIENT_FIRST')\n expected = self._set_expected(expected, protocol='TCP_CLIENT_FIRST')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_health_monitor(self):\n health_monitor = {'type': 'CONNECT', 'attemptsBeforeDeactivation': \n 1, 'delay': 1, 'timeout': 1}\n template = self._set_template(self.lb_template, healthMonitor=\n health_monitor)\n expected = self._set_expected(self.expected_body, healthMonitor=\n health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n health_monitor['bodyRegex'] = 'dfawefawe'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Unknown Property bodyRegex', str(exc))\n health_monitor['type'] = 'HTTP'\n health_monitor['bodyRegex'] = 'bodyRegex'\n health_monitor['statusRegex'] = 'statusRegex'\n health_monitor['hostHeader'] = 'hostHeader'\n health_monitor['path'] = 'path'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_ssl_termination(self):\n ssl_termination = {'privatekey': 'ewfawe',\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': True}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination)\n expected = self._set_expected(self.expected_body, sslTermination=\n ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Property certificate not assigned', six.text_type(exc))\n ssl_termination['certificate'] = 'dfaewfwef'\n template = self._set_template(template, sslTermination=ssl_termination)\n expected = self._set_expected(expected, sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_ssl_termination_unstripped_certificates(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '\\n\\nintermediate_certificate\\n',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ssl_termination_intermediateCertificate_None(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': None, 'secureTrafficOnly': False}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n add_ssl_termination_args = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '', 'secureTrafficOnly': False}\n fake_lb.add_ssl_termination(**add_ssl_termination_args)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_access_list(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n api_access_list = [{'address': '192.168.1.1/0', 'id': 1234, 'type':\n 'ALLOW'}, {'address': '172.165.3.43', 'id': 3422, 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn([])\n fake_lb.get_access_list().AndReturn(api_access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ref_id(self):\n \"\"\"The Reference ID of the resource is the resource ID.\"\"\"\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())\n\n def test_post_creation_error_page(self):\n error_page = 'REALLY BIG ERROR'\n template = self._set_template(self.lb_template, errorPage=error_page)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_ssl_termination(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': 'fawefwea',\n 'intermediateCertificate': 'intermediate_certificate',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_content_caching(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc = self._mock_loadbalancer(template, self.lb_name, self.\n expected_body)[0]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_check(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n loadbalancer = lb.CloudLoadBalancer('test', resdef, stack)\n loadbalancer._add_event = mock.Mock()\n mock_cloud_lb = mock.Mock()\n mock_get = mock.Mock(return_value=mock_cloud_lb)\n loadbalancer.clb.get = mock_get\n mock_cloud_lb.status = 'ACTIVE'\n scheduler.TaskRunner(loadbalancer.check)()\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('COMPLETE', loadbalancer.status)\n mock_cloud_lb.status = 'FOOBAR'\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('FOOBAR', str(exc))\n mock_get.side_effect = lb.NotFound('boom')\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('boom', str(exc))\n\n def test_update_add_node_by_address(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.nodes = [FakeNode(address=u'172.168.1.4', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=80, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'add_nodes')\n fake_lb.add_nodes([fake_lb.Node(address=expected_ip, port=80,\n condition='ENABLED', type='PRIMARY', weight=1)])\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_resolve_attr_noid(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n lbres = lb.CloudLoadBalancer('test', resdef, stack)\n self.assertIsNone(lbres._resolve_attribute('PublicIp'))\n\n def test_resolve_attr_virtualips(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4', type=\n 'PUBLIC', ipVersion='IPv6', id='test-id')]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n expected = [{'ip_version': 'IPv6', 'type': 'PUBLIC', 'id':\n 'test-id', 'address': '1.2.3.4'}]\n self.m.ReplayAll()\n self.assertEqual(expected, rsrc._resolve_attribute('virtualIps'))\n self.m.VerifyAll()\n\n def test_update_nodes_immutable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n current_nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb.nodes = current_nodes\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '4.4.4.4'\n update_template['Properties']['nodes'] = [{'addresses': ['1.1.1.1'],\n 'port': 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight':\n 1}, {'addresses': ['2.2.2.2'], 'port': 80, 'condition':\n 'DISABLED', 'type': 'PRIMARY', 'weight': 1}, {'addresses': [\n expected_ip], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.status = 'PENDING_UPDATE'\n fake_lb1.tracker = 'fake_lb1'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb1)\n fake_lb2.status = 'ACTIVE'\n fake_lb2.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'4.4.4.4', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb3 = copy.deepcopy(fake_lb2)\n fake_lb3.status = 'ACTIVE'\n fake_lb3.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb3.tracker = 'fake_lb3'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb4 = copy.deepcopy(fake_lb3)\n fake_lb4.status = 'ACTIVE'\n fake_lb4.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'DISABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb4.tracker = 'fake_lb4'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_pending_update_status(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n fake_lb1.status = 'PENDING_UPDATE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.name = 'updated_name'\n fake_lb2.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_immutable_exception(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.update(name='updated_name').AndRaise(Exception(msg))\n fake_lb.update(name='updated_name').AndReturn(None)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_create_immutable_exception(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn(access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.add_access_list(access_list).AndRaise(Exception(msg))\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_update_lb_name(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_multiple(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n fake_lb2.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name', algorithm='RANDOM')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_algorithm(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.algorithm = 'ROUND_ROBIN'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'update')\n fake_lb1.update(algorithm='RANDOM')\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_protocol(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['protocol'] = 'IMAPS'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.protocol = 'IMAPS'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(protocol='IMAPS')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_redirect(self):\n template = self._set_template(self.lb_template, protocol='HTTPS')\n expected = self._set_expected(self.expected_body, protocol='HTTPS')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['httpsRedirect'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(httpsRedirect=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_https(self):\n template = self._set_template(self.lb_template, protocol='HTTPS',\n httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTPS',\n httpsRedirect=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_HTTP_with_SSL_term(self):\n ssl_termination_template = {'privatekey': private_key,\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': \n True, 'securePort': 443, 'certificate': cert}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n ssl_termination_api['enabled'] = True\n del ssl_termination_api['privatekey']\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template, protocol='HTTP', httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTP',\n httpsRedirect=False)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n\n def test_update_lb_half_closed(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['halfClosed'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.halfClosed = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(halfClosed=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['port'] = 1234\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.port = 1234\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(port=1234)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_timeout(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['timeout'] = 120\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.timeout = 120\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(timeout=120)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['healthMonitor'] = {'type': 'HTTP',\n 'delay': 10, 'timeout': 10, 'attemptsBeforeDeactivation': 4,\n 'path': '/', 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'}\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({})\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n self.m.StubOutWithMock(fake_lb, 'add_health_monitor')\n fake_lb.add_health_monitor(attemptsBeforeDeactivation=4, bodyRegex=\n '.* testing .*', delay=10, hostHeader='example.com', path='/',\n statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n hm = {'type': 'HTTP', 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': '/', 'statusRegex':\n '^[234][0-9][0-9]$', 'bodyRegex': '.* testing .*', 'hostHeader':\n 'example.com'}\n template['Resources'][lb_name]['Properties']['healthMonitor'] = hm\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['healthMonitor'] = hm\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['healthMonitor']\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n fake_lb.get_health_monitor().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')\n fake_lb.delete_health_monitor()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_session_persistence_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('SOURCE_IP', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_session_persistence_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sessionPersistence'\n ] = 'SOURCE_IP'\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['sessionPersistence'] = {'persistenceType': 'SOURCE_IP'}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sessionPersistence']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sslTermination'] = {'securePort': \n 443, 'privatekey': private_key, 'certificate': cert,\n 'secureTrafficOnly': False, 'intermediateCertificate': ''}\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(securePort=443, privatekey=private_key,\n certificate=cert, secureTrafficOnly=False,\n intermediateCertificate='')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_delete(self):\n template = copy.deepcopy(self.lb_template)\n ssl_termination_template = {'securePort': 443, 'privatekey':\n private_key, 'certificate': cert, 'intermediateCertificate': '',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sslTermination'\n ] = ssl_termination_template\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sslTermination']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False})\n self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')\n fake_lb.delete_ssl_termination()\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['metadata'] = {'a': 1, 'b': 2}\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({})\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n self.m.StubOutWithMock(fake_lb, 'set_metadata')\n fake_lb.set_metadata({'a': 1, 'b': 2})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['metadata'] = {'a': 1,\n 'b': 2}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['metadata'] = mox.SameElementsAs([{'key': 'a',\n 'value': 1}, {'key': 'b', 'value': 2}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['metadata']\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n fake_lb.get_metadata().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_metadata')\n fake_lb.delete_metadata()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['errorPage'] = error_page\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': 'foo'}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n template['Resources'][lb_name]['Properties']['errorPage'] = error_page\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['errorPage']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'clear_error_page')\n fake_lb.clear_error_page()\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': ''}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_logging_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = True\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertTrue(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.connection_logging = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.connection_logging = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionLogging']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = False\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionThrottle'] = {'maxConnections'\n : 1000}\n self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 100})\n fake_lb.add_connection_throttle(maxConnections=1000,\n maxConnectionRate=None, minConnections=None, rateInterval=None)\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 1000})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionThrottle'] = {\n 'maxConnections': 1000}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionThrottle'] = {'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionThrottle']\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None})\n self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')\n fake_lb.delete_connection_throttle()\n fake_lb.get_connection_throttle().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'ENABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_deleted(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['contentCaching']\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'DISABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_immutable(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(Exception('immutable'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_non_immutable_exc(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(FakeException())\n self.m.ReplayAll()\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(rsrc.delete))\n self.assertIn('FakeException', six.text_type(exc))\n self.m.VerifyAll()\n\n def test_delete_states(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb3 = copy.deepcopy(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2.status = 'PENDING_DELETE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n fake_lb3.status = 'DELETED'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_redir(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTPS', 'port': 443,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n props['protocol'] = 'HTTP'\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb_2',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test_2', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n\n def test_invalid_redir_proto(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'TCP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_invalid_redir_ssl(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': False, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 1234, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_update_nodes_condition_draining(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'DRAINING', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'DRAINING', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address=expected_ip, port=80,\n condition='DRAINING', type='PRIMARY', weight=1)])\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'DRAINING', type='PRIMARY', weight=1), FakeNode(\n address=u'172.168.1.4', port=80, condition=u'DRAINING', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_add_same_address_different_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': ['166.78.103.141'],\n 'port': 81, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}\n ]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address='166.78.103.141', port=81,\n condition='ENABLED', type='PRIMARY', weight=1)])\n fake_lb1.tracker = 'fake_lb1'\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=81, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_defaults(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]\n tmpl_node['type'] = 'PRIMARY'\n tmpl_node['condition'] = 'ENABLED'\n tmpl_node['weight'] = 1\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = [FakeNode(address=u'166.78.103.141', port=\n 80, condition=u'ENABLED', type='PRIMARY', weight=1)]\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n",
"step-3": "<mask token>\n\n\nclass FakeLoadBalancerClient(object):\n\n def __init__(self, *args, **kwargs):\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n pass\n\n def get(self, *args, **kwargs):\n pass\n <mask token>\n\n\nclass FakeLoadBalancer(object):\n\n def __init__(self, name=None, info=None, *args, **kwargs):\n name = name or uuid.uuid4()\n info = info or {'fake': 'fake'}\n self.id = uuid.uuid4()\n self.manager = FakeLoadBalancerManager()\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n self.nodes = []\n self.algorithm = 'ROUND_ROBIN'\n self.session_persistence = 'HTTP_COOKIE'\n self.connection_logging = False\n self.timeout = None\n self.httpsRedirect = False\n self.protocol = None\n self.port = None\n self.name = None\n self.halfClosed = None\n self.content_caching = False\n\n def get(self, *args, **kwargs):\n pass\n\n def add_nodes(self, *args, **kwargs):\n pass\n\n def add_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_error_page(self, *args, **kwargs):\n pass\n\n def clear_error_page(self, *args, **kwargs):\n pass\n\n def add_access_list(self, *args, **kwargs):\n pass\n\n def update(self, *args, **kwargs):\n pass\n\n def add_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_metadata(self, *args, **kwargs):\n pass\n\n def delete_metadata(self, *args, **kwargs):\n pass\n\n def add_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete(self, *args, **kwargs):\n pass\n\n def get_health_monitor(self, *args, **kwargs):\n return {}\n\n def get_metadata(self, *args, **kwargs):\n return {}\n\n def get_error_page(self, *args, **kwargs):\n pass\n\n def get_connection_throttle(self, *args, **kwargs):\n pass\n\n def get_ssl_termination(self, *args, **kwargs):\n pass\n\n def get_access_list(self, *args, **kwargs):\n pass\n\n\nclass LoadBalancerWithFakeClient(lb.CloudLoadBalancer):\n\n def cloud_lb(self):\n return FakeLoadBalancerClient()\n\n\n<mask token>\n\n\nclass LoadBalancerTest(common.HeatTestCase):\n\n def setUp(self):\n super(LoadBalancerTest, self).setUp()\n self.lb_template = {'AWSTemplateFormatVersion': '2010-09-09',\n 'Description': 'fawef', 'Resources': {self.\n _get_lb_resource_name(): {'Type':\n 'Rackspace::Cloud::LoadBalancer', 'Properties': {'name':\n 'test-clb', 'nodes': [{'addresses': ['166.78.103.141'], 'port':\n 80, 'condition': 'ENABLED'}], 'protocol': 'HTTP', 'port': 80,\n 'virtualIps': [{'type': 'PUBLIC', 'ipVersion': 'IPV6'}],\n 'algorithm': 'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000}, 'timeout': 110, 'contentCaching':\n 'DISABLED'}}}}\n self.lb_name = 'test-clb'\n self.expected_body = {'nodes': [FakeNode(address=u'166.78.103.141',\n port=80, condition=u'ENABLED', type=u'PRIMARY', weight=1)],\n 'protocol': u'HTTP', 'port': 80, 'virtual_ips': [FakeVirtualIP(\n type=u'PUBLIC', ipVersion=u'IPV6')], 'algorithm':\n u'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000, 'maxConnections': None,\n 'rateInterval': None, 'minConnections': None},\n 'connectionLogging': None, 'halfClosed': None, 'healthMonitor':\n None, 'metadata': None, 'sessionPersistence': None, 'timeout': \n 110, 'httpsRedirect': False}\n lb.resource_mapping = override_resource\n resource._register_class('Rackspace::Cloud::LoadBalancer',\n LoadBalancerWithFakeClient)\n\n def _get_lb_resource_name(self):\n return 'lb-' + str(uuid.uuid4())\n\n def __getattribute__(self, name):\n if name == 'expected_body' or name == 'lb_template':\n return copy.deepcopy(super(LoadBalancerTest, self).\n __getattribute__(name))\n return super(LoadBalancerTest, self).__getattribute__(name)\n\n def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):\n resource_defns = tmpl.resource_definitions(stack)\n rsrc = LoadBalancerWithFakeClient(resource_name, resource_defns[\n resource_name], stack)\n fake_lb = FakeLoadBalancer(name=lb_name)\n fake_lb.status = 'ACTIVE'\n fake_lb.resource_id = 1234\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n return rsrc, fake_lb\n\n def _get_first_resource_name(self, templ):\n return next(k for k in templ['Resources'])\n\n def _mock_loadbalancer(self, lb_template, expected_name, expected_body):\n t = template_format.parse(json.dumps(lb_template))\n self.stack = utils.parse_stack(t, stack_name=utils.random_name())\n rsrc, fake_lb = self._mock_create(self.stack.t, self.stack, self.\n _get_first_resource_name(lb_template), expected_name, expected_body\n )\n return rsrc, fake_lb\n\n def _set_template(self, templ, **kwargs):\n for k, v in six.iteritems(kwargs):\n templ['Resources'][self._get_first_resource_name(templ)][\n 'Properties'][k] = v\n return templ\n\n def _set_expected(self, expected, **kwargs):\n for k, v in six.iteritems(kwargs):\n expected[k] = v\n return expected\n\n def test_process_node(self):\n nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True}, {\n 'addresses': ['4567', '8901', '8903'], 'port': 80, 'enabled': \n True}, {'addresses': [], 'port': 80, 'enabled': True}]\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},\n {'address': '4567', 'port': 80, 'enabled': True}, {'address':\n '8901', 'port': 80, 'enabled': True}, {'address': '8903',\n 'port': 80, 'enabled': True}]\n self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))\n\n def test_nodeless(self):\n \"\"\"It's possible to create a LoadBalancer resource with no nodes.\"\"\"\n template = self._set_template(self.lb_template, nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_alter_properties(self):\n template = self._set_template(self.lb_template, sessionPersistence=\n 'HTTP_COOKIE', connectionLogging=True, metadata={'yolo':\n 'heeyyy_gurl'})\n expected = self._set_expected(self.expected_body,\n sessionPersistence={'persistenceType': 'HTTP_COOKIE'},\n connectionLogging={'enabled': True}, metadata=[{'key': 'yolo',\n 'value': 'heeyyy_gurl'}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_validate_vip(self):\n snippet = {'nodes': [], 'protocol': 'HTTP', 'port': 80,\n 'halfClosed': None, 'algorithm': u'LEAST_CONNECTIONS',\n 'virtualIps': [{'id': '1234'}]}\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = rsrc_defn.ResourceDefinition('testvip', lb.\n CloudLoadBalancer, properties=snippet)\n rsrc = lb.CloudLoadBalancer('testvip', resdef, stack)\n self.assertIsNone(rsrc.validate())\n snippet['virtualIps'][0]['type'] = 'PUBLIC'\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Cannot specify type or version', str(exc))\n snippet['virtualIps'] = [{}]\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Must specify VIP type and version', str(exc))\n\n def test_validate_half_closed(self):\n template = self._set_template(self.lb_template, halfClosed=True)\n expected = self._set_expected(self.expected_body, halfClosed=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn(\n 'The halfClosed property is only available for the TCP or TCP_CLIENT_FIRST protocols'\n , str(exc))\n template = self._set_template(template, protocol='TCP')\n expected = self._set_expected(expected, protocol='TCP')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n template = self._set_template(template, protocol='TCP_CLIENT_FIRST')\n expected = self._set_expected(expected, protocol='TCP_CLIENT_FIRST')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_health_monitor(self):\n health_monitor = {'type': 'CONNECT', 'attemptsBeforeDeactivation': \n 1, 'delay': 1, 'timeout': 1}\n template = self._set_template(self.lb_template, healthMonitor=\n health_monitor)\n expected = self._set_expected(self.expected_body, healthMonitor=\n health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n health_monitor['bodyRegex'] = 'dfawefawe'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Unknown Property bodyRegex', str(exc))\n health_monitor['type'] = 'HTTP'\n health_monitor['bodyRegex'] = 'bodyRegex'\n health_monitor['statusRegex'] = 'statusRegex'\n health_monitor['hostHeader'] = 'hostHeader'\n health_monitor['path'] = 'path'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_ssl_termination(self):\n ssl_termination = {'privatekey': 'ewfawe',\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': True}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination)\n expected = self._set_expected(self.expected_body, sslTermination=\n ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Property certificate not assigned', six.text_type(exc))\n ssl_termination['certificate'] = 'dfaewfwef'\n template = self._set_template(template, sslTermination=ssl_termination)\n expected = self._set_expected(expected, sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_ssl_termination_unstripped_certificates(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '\\n\\nintermediate_certificate\\n',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ssl_termination_intermediateCertificate_None(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': None, 'secureTrafficOnly': False}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n add_ssl_termination_args = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '', 'secureTrafficOnly': False}\n fake_lb.add_ssl_termination(**add_ssl_termination_args)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_access_list(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n api_access_list = [{'address': '192.168.1.1/0', 'id': 1234, 'type':\n 'ALLOW'}, {'address': '172.165.3.43', 'id': 3422, 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn([])\n fake_lb.get_access_list().AndReturn(api_access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ref_id(self):\n \"\"\"The Reference ID of the resource is the resource ID.\"\"\"\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())\n\n def test_post_creation_error_page(self):\n error_page = 'REALLY BIG ERROR'\n template = self._set_template(self.lb_template, errorPage=error_page)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_ssl_termination(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': 'fawefwea',\n 'intermediateCertificate': 'intermediate_certificate',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_content_caching(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc = self._mock_loadbalancer(template, self.lb_name, self.\n expected_body)[0]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_check(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n loadbalancer = lb.CloudLoadBalancer('test', resdef, stack)\n loadbalancer._add_event = mock.Mock()\n mock_cloud_lb = mock.Mock()\n mock_get = mock.Mock(return_value=mock_cloud_lb)\n loadbalancer.clb.get = mock_get\n mock_cloud_lb.status = 'ACTIVE'\n scheduler.TaskRunner(loadbalancer.check)()\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('COMPLETE', loadbalancer.status)\n mock_cloud_lb.status = 'FOOBAR'\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('FOOBAR', str(exc))\n mock_get.side_effect = lb.NotFound('boom')\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('boom', str(exc))\n\n def test_update_add_node_by_address(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.nodes = [FakeNode(address=u'172.168.1.4', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=80, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'add_nodes')\n fake_lb.add_nodes([fake_lb.Node(address=expected_ip, port=80,\n condition='ENABLED', type='PRIMARY', weight=1)])\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_resolve_attr_noid(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n lbres = lb.CloudLoadBalancer('test', resdef, stack)\n self.assertIsNone(lbres._resolve_attribute('PublicIp'))\n\n def test_resolve_attr_virtualips(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4', type=\n 'PUBLIC', ipVersion='IPv6', id='test-id')]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n expected = [{'ip_version': 'IPv6', 'type': 'PUBLIC', 'id':\n 'test-id', 'address': '1.2.3.4'}]\n self.m.ReplayAll()\n self.assertEqual(expected, rsrc._resolve_attribute('virtualIps'))\n self.m.VerifyAll()\n\n def test_update_nodes_immutable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n current_nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb.nodes = current_nodes\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '4.4.4.4'\n update_template['Properties']['nodes'] = [{'addresses': ['1.1.1.1'],\n 'port': 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight':\n 1}, {'addresses': ['2.2.2.2'], 'port': 80, 'condition':\n 'DISABLED', 'type': 'PRIMARY', 'weight': 1}, {'addresses': [\n expected_ip], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.status = 'PENDING_UPDATE'\n fake_lb1.tracker = 'fake_lb1'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb1)\n fake_lb2.status = 'ACTIVE'\n fake_lb2.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'4.4.4.4', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb3 = copy.deepcopy(fake_lb2)\n fake_lb3.status = 'ACTIVE'\n fake_lb3.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb3.tracker = 'fake_lb3'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb4 = copy.deepcopy(fake_lb3)\n fake_lb4.status = 'ACTIVE'\n fake_lb4.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'DISABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb4.tracker = 'fake_lb4'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_pending_update_status(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n fake_lb1.status = 'PENDING_UPDATE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.name = 'updated_name'\n fake_lb2.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_immutable_exception(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.update(name='updated_name').AndRaise(Exception(msg))\n fake_lb.update(name='updated_name').AndReturn(None)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_create_immutable_exception(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn(access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.add_access_list(access_list).AndRaise(Exception(msg))\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_update_lb_name(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_multiple(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n fake_lb2.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name', algorithm='RANDOM')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_algorithm(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.algorithm = 'ROUND_ROBIN'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'update')\n fake_lb1.update(algorithm='RANDOM')\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_protocol(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['protocol'] = 'IMAPS'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.protocol = 'IMAPS'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(protocol='IMAPS')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_redirect(self):\n template = self._set_template(self.lb_template, protocol='HTTPS')\n expected = self._set_expected(self.expected_body, protocol='HTTPS')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['httpsRedirect'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(httpsRedirect=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_https(self):\n template = self._set_template(self.lb_template, protocol='HTTPS',\n httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTPS',\n httpsRedirect=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_HTTP_with_SSL_term(self):\n ssl_termination_template = {'privatekey': private_key,\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': \n True, 'securePort': 443, 'certificate': cert}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n ssl_termination_api['enabled'] = True\n del ssl_termination_api['privatekey']\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template, protocol='HTTP', httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTP',\n httpsRedirect=False)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n\n def test_update_lb_half_closed(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['halfClosed'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.halfClosed = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(halfClosed=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['port'] = 1234\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.port = 1234\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(port=1234)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_timeout(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['timeout'] = 120\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.timeout = 120\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(timeout=120)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['healthMonitor'] = {'type': 'HTTP',\n 'delay': 10, 'timeout': 10, 'attemptsBeforeDeactivation': 4,\n 'path': '/', 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'}\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({})\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n self.m.StubOutWithMock(fake_lb, 'add_health_monitor')\n fake_lb.add_health_monitor(attemptsBeforeDeactivation=4, bodyRegex=\n '.* testing .*', delay=10, hostHeader='example.com', path='/',\n statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n hm = {'type': 'HTTP', 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': '/', 'statusRegex':\n '^[234][0-9][0-9]$', 'bodyRegex': '.* testing .*', 'hostHeader':\n 'example.com'}\n template['Resources'][lb_name]['Properties']['healthMonitor'] = hm\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['healthMonitor'] = hm\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['healthMonitor']\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n fake_lb.get_health_monitor().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')\n fake_lb.delete_health_monitor()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_session_persistence_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('SOURCE_IP', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_session_persistence_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sessionPersistence'\n ] = 'SOURCE_IP'\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['sessionPersistence'] = {'persistenceType': 'SOURCE_IP'}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sessionPersistence']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sslTermination'] = {'securePort': \n 443, 'privatekey': private_key, 'certificate': cert,\n 'secureTrafficOnly': False, 'intermediateCertificate': ''}\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(securePort=443, privatekey=private_key,\n certificate=cert, secureTrafficOnly=False,\n intermediateCertificate='')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_delete(self):\n template = copy.deepcopy(self.lb_template)\n ssl_termination_template = {'securePort': 443, 'privatekey':\n private_key, 'certificate': cert, 'intermediateCertificate': '',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sslTermination'\n ] = ssl_termination_template\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sslTermination']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False})\n self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')\n fake_lb.delete_ssl_termination()\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['metadata'] = {'a': 1, 'b': 2}\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({})\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n self.m.StubOutWithMock(fake_lb, 'set_metadata')\n fake_lb.set_metadata({'a': 1, 'b': 2})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['metadata'] = {'a': 1,\n 'b': 2}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['metadata'] = mox.SameElementsAs([{'key': 'a',\n 'value': 1}, {'key': 'b', 'value': 2}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['metadata']\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n fake_lb.get_metadata().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_metadata')\n fake_lb.delete_metadata()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['errorPage'] = error_page\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': 'foo'}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n template['Resources'][lb_name]['Properties']['errorPage'] = error_page\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['errorPage']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'clear_error_page')\n fake_lb.clear_error_page()\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': ''}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_logging_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = True\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertTrue(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.connection_logging = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.connection_logging = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionLogging']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = False\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionThrottle'] = {'maxConnections'\n : 1000}\n self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 100})\n fake_lb.add_connection_throttle(maxConnections=1000,\n maxConnectionRate=None, minConnections=None, rateInterval=None)\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 1000})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionThrottle'] = {\n 'maxConnections': 1000}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionThrottle'] = {'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionThrottle']\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None})\n self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')\n fake_lb.delete_connection_throttle()\n fake_lb.get_connection_throttle().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'ENABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_deleted(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['contentCaching']\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'DISABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_immutable(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(Exception('immutable'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_non_immutable_exc(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(FakeException())\n self.m.ReplayAll()\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(rsrc.delete))\n self.assertIn('FakeException', six.text_type(exc))\n self.m.VerifyAll()\n\n def test_delete_states(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb3 = copy.deepcopy(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2.status = 'PENDING_DELETE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n fake_lb3.status = 'DELETED'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_redir(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTPS', 'port': 443,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n props['protocol'] = 'HTTP'\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb_2',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test_2', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n\n def test_invalid_redir_proto(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'TCP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_invalid_redir_ssl(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': False, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 1234, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_update_nodes_condition_draining(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'DRAINING', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'DRAINING', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address=expected_ip, port=80,\n condition='DRAINING', type='PRIMARY', weight=1)])\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'DRAINING', type='PRIMARY', weight=1), FakeNode(\n address=u'172.168.1.4', port=80, condition=u'DRAINING', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_add_same_address_different_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': ['166.78.103.141'],\n 'port': 81, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}\n ]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address='166.78.103.141', port=81,\n condition='ENABLED', type='PRIMARY', weight=1)])\n fake_lb1.tracker = 'fake_lb1'\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=81, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_defaults(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]\n tmpl_node['type'] = 'PRIMARY'\n tmpl_node['condition'] = 'ENABLED'\n tmpl_node['weight'] = 1\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = [FakeNode(address=u'166.78.103.141', port=\n 80, condition=u'ENABLED', type='PRIMARY', weight=1)]\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n",
"step-4": "<mask token>\n\n\nclass FakeVirtualIP(object):\n\n def __init__(self, address=None, port=None, condition=None, ipVersion=\n None, type=None, id=None):\n self.address = address\n self.port = port\n self.condition = condition\n self.ipVersion = ipVersion\n self.type = type\n self.id = id\n self.ip_version = ipVersion\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass FakeLoadBalancerClient(object):\n\n def __init__(self, *args, **kwargs):\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n pass\n\n def get(self, *args, **kwargs):\n pass\n\n def create(self, *args, **kwargs):\n pass\n\n\nclass FakeLoadBalancer(object):\n\n def __init__(self, name=None, info=None, *args, **kwargs):\n name = name or uuid.uuid4()\n info = info or {'fake': 'fake'}\n self.id = uuid.uuid4()\n self.manager = FakeLoadBalancerManager()\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n self.nodes = []\n self.algorithm = 'ROUND_ROBIN'\n self.session_persistence = 'HTTP_COOKIE'\n self.connection_logging = False\n self.timeout = None\n self.httpsRedirect = False\n self.protocol = None\n self.port = None\n self.name = None\n self.halfClosed = None\n self.content_caching = False\n\n def get(self, *args, **kwargs):\n pass\n\n def add_nodes(self, *args, **kwargs):\n pass\n\n def add_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_error_page(self, *args, **kwargs):\n pass\n\n def clear_error_page(self, *args, **kwargs):\n pass\n\n def add_access_list(self, *args, **kwargs):\n pass\n\n def update(self, *args, **kwargs):\n pass\n\n def add_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_metadata(self, *args, **kwargs):\n pass\n\n def delete_metadata(self, *args, **kwargs):\n pass\n\n def add_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete(self, *args, **kwargs):\n pass\n\n def get_health_monitor(self, *args, **kwargs):\n return {}\n\n def get_metadata(self, *args, **kwargs):\n return {}\n\n def get_error_page(self, *args, **kwargs):\n pass\n\n def get_connection_throttle(self, *args, **kwargs):\n pass\n\n def get_ssl_termination(self, *args, **kwargs):\n pass\n\n def get_access_list(self, *args, **kwargs):\n pass\n\n\nclass LoadBalancerWithFakeClient(lb.CloudLoadBalancer):\n\n def cloud_lb(self):\n return FakeLoadBalancerClient()\n\n\n<mask token>\n\n\nclass LoadBalancerTest(common.HeatTestCase):\n\n def setUp(self):\n super(LoadBalancerTest, self).setUp()\n self.lb_template = {'AWSTemplateFormatVersion': '2010-09-09',\n 'Description': 'fawef', 'Resources': {self.\n _get_lb_resource_name(): {'Type':\n 'Rackspace::Cloud::LoadBalancer', 'Properties': {'name':\n 'test-clb', 'nodes': [{'addresses': ['166.78.103.141'], 'port':\n 80, 'condition': 'ENABLED'}], 'protocol': 'HTTP', 'port': 80,\n 'virtualIps': [{'type': 'PUBLIC', 'ipVersion': 'IPV6'}],\n 'algorithm': 'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000}, 'timeout': 110, 'contentCaching':\n 'DISABLED'}}}}\n self.lb_name = 'test-clb'\n self.expected_body = {'nodes': [FakeNode(address=u'166.78.103.141',\n port=80, condition=u'ENABLED', type=u'PRIMARY', weight=1)],\n 'protocol': u'HTTP', 'port': 80, 'virtual_ips': [FakeVirtualIP(\n type=u'PUBLIC', ipVersion=u'IPV6')], 'algorithm':\n u'LEAST_CONNECTIONS', 'connectionThrottle': {\n 'maxConnectionRate': 1000, 'maxConnections': None,\n 'rateInterval': None, 'minConnections': None},\n 'connectionLogging': None, 'halfClosed': None, 'healthMonitor':\n None, 'metadata': None, 'sessionPersistence': None, 'timeout': \n 110, 'httpsRedirect': False}\n lb.resource_mapping = override_resource\n resource._register_class('Rackspace::Cloud::LoadBalancer',\n LoadBalancerWithFakeClient)\n\n def _get_lb_resource_name(self):\n return 'lb-' + str(uuid.uuid4())\n\n def __getattribute__(self, name):\n if name == 'expected_body' or name == 'lb_template':\n return copy.deepcopy(super(LoadBalancerTest, self).\n __getattribute__(name))\n return super(LoadBalancerTest, self).__getattribute__(name)\n\n def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):\n resource_defns = tmpl.resource_definitions(stack)\n rsrc = LoadBalancerWithFakeClient(resource_name, resource_defns[\n resource_name], stack)\n fake_lb = FakeLoadBalancer(name=lb_name)\n fake_lb.status = 'ACTIVE'\n fake_lb.resource_id = 1234\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n return rsrc, fake_lb\n\n def _get_first_resource_name(self, templ):\n return next(k for k in templ['Resources'])\n\n def _mock_loadbalancer(self, lb_template, expected_name, expected_body):\n t = template_format.parse(json.dumps(lb_template))\n self.stack = utils.parse_stack(t, stack_name=utils.random_name())\n rsrc, fake_lb = self._mock_create(self.stack.t, self.stack, self.\n _get_first_resource_name(lb_template), expected_name, expected_body\n )\n return rsrc, fake_lb\n\n def _set_template(self, templ, **kwargs):\n for k, v in six.iteritems(kwargs):\n templ['Resources'][self._get_first_resource_name(templ)][\n 'Properties'][k] = v\n return templ\n\n def _set_expected(self, expected, **kwargs):\n for k, v in six.iteritems(kwargs):\n expected[k] = v\n return expected\n\n def test_process_node(self):\n nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True}, {\n 'addresses': ['4567', '8901', '8903'], 'port': 80, 'enabled': \n True}, {'addresses': [], 'port': 80, 'enabled': True}]\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},\n {'address': '4567', 'port': 80, 'enabled': True}, {'address':\n '8901', 'port': 80, 'enabled': True}, {'address': '8903',\n 'port': 80, 'enabled': True}]\n self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))\n\n def test_nodeless(self):\n \"\"\"It's possible to create a LoadBalancer resource with no nodes.\"\"\"\n template = self._set_template(self.lb_template, nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_alter_properties(self):\n template = self._set_template(self.lb_template, sessionPersistence=\n 'HTTP_COOKIE', connectionLogging=True, metadata={'yolo':\n 'heeyyy_gurl'})\n expected = self._set_expected(self.expected_body,\n sessionPersistence={'persistenceType': 'HTTP_COOKIE'},\n connectionLogging={'enabled': True}, metadata=[{'key': 'yolo',\n 'value': 'heeyyy_gurl'}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_validate_vip(self):\n snippet = {'nodes': [], 'protocol': 'HTTP', 'port': 80,\n 'halfClosed': None, 'algorithm': u'LEAST_CONNECTIONS',\n 'virtualIps': [{'id': '1234'}]}\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = rsrc_defn.ResourceDefinition('testvip', lb.\n CloudLoadBalancer, properties=snippet)\n rsrc = lb.CloudLoadBalancer('testvip', resdef, stack)\n self.assertIsNone(rsrc.validate())\n snippet['virtualIps'][0]['type'] = 'PUBLIC'\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Cannot specify type or version', str(exc))\n snippet['virtualIps'] = [{}]\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Must specify VIP type and version', str(exc))\n\n def test_validate_half_closed(self):\n template = self._set_template(self.lb_template, halfClosed=True)\n expected = self._set_expected(self.expected_body, halfClosed=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn(\n 'The halfClosed property is only available for the TCP or TCP_CLIENT_FIRST protocols'\n , str(exc))\n template = self._set_template(template, protocol='TCP')\n expected = self._set_expected(expected, protocol='TCP')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n template = self._set_template(template, protocol='TCP_CLIENT_FIRST')\n expected = self._set_expected(expected, protocol='TCP_CLIENT_FIRST')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_health_monitor(self):\n health_monitor = {'type': 'CONNECT', 'attemptsBeforeDeactivation': \n 1, 'delay': 1, 'timeout': 1}\n template = self._set_template(self.lb_template, healthMonitor=\n health_monitor)\n expected = self._set_expected(self.expected_body, healthMonitor=\n health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n health_monitor['bodyRegex'] = 'dfawefawe'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Unknown Property bodyRegex', str(exc))\n health_monitor['type'] = 'HTTP'\n health_monitor['bodyRegex'] = 'bodyRegex'\n health_monitor['statusRegex'] = 'statusRegex'\n health_monitor['hostHeader'] = 'hostHeader'\n health_monitor['path'] = 'path'\n template = self._set_template(template, healthMonitor=health_monitor)\n expected = self._set_expected(expected, healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_ssl_termination(self):\n ssl_termination = {'privatekey': 'ewfawe',\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': True}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination)\n expected = self._set_expected(self.expected_body, sslTermination=\n ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn('Property certificate not assigned', six.text_type(exc))\n ssl_termination['certificate'] = 'dfaewfwef'\n template = self._set_template(template, sslTermination=ssl_termination)\n expected = self._set_expected(expected, sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_ssl_termination_unstripped_certificates(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '\\n\\nintermediate_certificate\\n',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ssl_termination_intermediateCertificate_None(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': None, 'secureTrafficOnly': False}\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n add_ssl_termination_args = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': \"\"\" \nfawefwea\n \"\"\",\n 'intermediateCertificate': '', 'secureTrafficOnly': False}\n fake_lb.add_ssl_termination(**add_ssl_termination_args)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_access_list(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n api_access_list = [{'address': '192.168.1.1/0', 'id': 1234, 'type':\n 'ALLOW'}, {'address': '172.165.3.43', 'id': 3422, 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn([])\n fake_lb.get_access_list().AndReturn(api_access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ref_id(self):\n \"\"\"The Reference ID of the resource is the resource ID.\"\"\"\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())\n\n def test_post_creation_error_page(self):\n error_page = 'REALLY BIG ERROR'\n template = self._set_template(self.lb_template, errorPage=error_page)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_ssl_termination(self):\n ssl_termination_template = {'securePort': 443, 'privatekey':\n 'afwefawe', 'certificate': 'fawefwea',\n 'intermediateCertificate': 'intermediate_certificate',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': 'fawefwea', 'intermediateCertificate':\n 'intermediate_certificate', 'secureTrafficOnly': False,\n 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_content_caching(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc = self._mock_loadbalancer(template, self.lb_name, self.\n expected_body)[0]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_check(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n loadbalancer = lb.CloudLoadBalancer('test', resdef, stack)\n loadbalancer._add_event = mock.Mock()\n mock_cloud_lb = mock.Mock()\n mock_get = mock.Mock(return_value=mock_cloud_lb)\n loadbalancer.clb.get = mock_get\n mock_cloud_lb.status = 'ACTIVE'\n scheduler.TaskRunner(loadbalancer.check)()\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('COMPLETE', loadbalancer.status)\n mock_cloud_lb.status = 'FOOBAR'\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('FOOBAR', str(exc))\n mock_get.side_effect = lb.NotFound('boom')\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('boom', str(exc))\n\n def test_update_add_node_by_address(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.nodes = [FakeNode(address=u'172.168.1.4', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=80, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'add_nodes')\n fake_lb.add_nodes([fake_lb.Node(address=expected_ip, port=80,\n condition='ENABLED', type='PRIMARY', weight=1)])\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_resolve_attr_noid(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n lbres = lb.CloudLoadBalancer('test', resdef, stack)\n self.assertIsNone(lbres._resolve_attribute('PublicIp'))\n\n def test_resolve_attr_virtualips(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4', type=\n 'PUBLIC', ipVersion='IPv6', id='test-id')]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n expected = [{'ip_version': 'IPv6', 'type': 'PUBLIC', 'id':\n 'test-id', 'address': '1.2.3.4'}]\n self.m.ReplayAll()\n self.assertEqual(expected, rsrc._resolve_attribute('virtualIps'))\n self.m.VerifyAll()\n\n def test_update_nodes_immutable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n current_nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb.nodes = current_nodes\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '4.4.4.4'\n update_template['Properties']['nodes'] = [{'addresses': ['1.1.1.1'],\n 'port': 80, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight':\n 1}, {'addresses': ['2.2.2.2'], 'port': 80, 'condition':\n 'DISABLED', 'type': 'PRIMARY', 'weight': 1}, {'addresses': [\n expected_ip], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.status = 'PENDING_UPDATE'\n fake_lb1.tracker = 'fake_lb1'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb1)\n fake_lb2.status = 'ACTIVE'\n fake_lb2.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'3.3.3.3', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'4.4.4.4', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb3 = copy.deepcopy(fake_lb2)\n fake_lb3.status = 'ACTIVE'\n fake_lb3.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'ENABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb3.tracker = 'fake_lb3'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb4 = copy.deepcopy(fake_lb3)\n fake_lb4.status = 'ACTIVE'\n fake_lb4.nodes = [FakeNode(address=u'1.1.1.1', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1), FakeNode(address=\n u'2.2.2.2', port=80, condition=u'DISABLED', type='PRIMARY',\n weight=1), FakeNode(address=u'4.4.4.4', port=80, condition=\n u'ENABLED', type='PRIMARY', weight=1)]\n fake_lb4.tracker = 'fake_lb4'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_pending_update_status(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n fake_lb1.status = 'PENDING_UPDATE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.name = 'updated_name'\n fake_lb2.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_immutable_exception(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.update(name='updated_name').AndRaise(Exception(msg))\n fake_lb.update(name='updated_name').AndReturn(None)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_create_immutable_exception(self):\n access_list = [{'address': '192.168.1.1/0', 'type': 'ALLOW'}, {\n 'address': '172.165.3.43', 'type': 'DENY'}]\n template = self._set_template(self.lb_template, accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn(access_list)\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n msg = (\n \"Load Balancer '%s' has a status of 'PENDING_UPDATE' and is considered immutable.\"\n % rsrc.resource_id)\n fake_lb.add_access_list(access_list).AndRaise(Exception(msg))\n fake_lb.add_access_list(access_list)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_update_lb_name(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_multiple(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = 'updated_name'\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n fake_lb2.name = 'updated_name'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name='updated_name', algorithm='RANDOM')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_algorithm(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['algorithm'] = 'RANDOM'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.algorithm = 'ROUND_ROBIN'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'update')\n fake_lb1.update(algorithm='RANDOM')\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = 'RANDOM'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_protocol(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['protocol'] = 'IMAPS'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.protocol = 'IMAPS'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(protocol='IMAPS')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_redirect(self):\n template = self._set_template(self.lb_template, protocol='HTTPS')\n expected = self._set_expected(self.expected_body, protocol='HTTPS')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['httpsRedirect'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(httpsRedirect=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_https(self):\n template = self._set_template(self.lb_template, protocol='HTTPS',\n httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTPS',\n httpsRedirect=True)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_HTTP_with_SSL_term(self):\n ssl_termination_template = {'privatekey': private_key,\n 'intermediateCertificate': 'fwaefawe', 'secureTrafficOnly': \n True, 'securePort': 443, 'certificate': cert}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n ssl_termination_api['enabled'] = True\n del ssl_termination_api['privatekey']\n template = self._set_template(self.lb_template, sslTermination=\n ssl_termination_template, protocol='HTTP', httpsRedirect=True)\n expected = self._set_expected(self.expected_body, protocol='HTTP',\n httpsRedirect=False)\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected)\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n\n def test_update_lb_half_closed(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['halfClosed'] = True\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.halfClosed = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(halfClosed=True)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['port'] = 1234\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.port = 1234\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(port=1234)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_timeout(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['timeout'] = 120\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.timeout = 120\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(timeout=120)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['healthMonitor'] = {'type': 'HTTP',\n 'delay': 10, 'timeout': 10, 'attemptsBeforeDeactivation': 4,\n 'path': '/', 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'}\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({})\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n self.m.StubOutWithMock(fake_lb, 'add_health_monitor')\n fake_lb.add_health_monitor(attemptsBeforeDeactivation=4, bodyRegex=\n '.* testing .*', delay=10, hostHeader='example.com', path='/',\n statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n hm = {'type': 'HTTP', 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': '/', 'statusRegex':\n '^[234][0-9][0-9]$', 'bodyRegex': '.* testing .*', 'hostHeader':\n 'example.com'}\n template['Resources'][lb_name]['Properties']['healthMonitor'] = hm\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['healthMonitor'] = hm\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['healthMonitor']\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({'type': 'HTTP', 'delay': 10,\n 'timeout': 10, 'attemptsBeforeDeactivation': 4, 'path': '/',\n 'statusRegex': '^[234][0-9][0-9]$', 'bodyRegex':\n '.* testing .*', 'hostHeader': 'example.com'})\n fake_lb.get_health_monitor().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')\n fake_lb.delete_health_monitor()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_session_persistence_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('SOURCE_IP', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_session_persistence_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sessionPersistence'\n ] = 'SOURCE_IP'\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['sessionPersistence'] = {'persistenceType': 'SOURCE_IP'}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sessionPersistence']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sslTermination'] = {'securePort': \n 443, 'privatekey': private_key, 'certificate': cert,\n 'secureTrafficOnly': False, 'intermediateCertificate': ''}\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(securePort=443, privatekey=private_key,\n certificate=cert, secureTrafficOnly=False,\n intermediateCertificate='')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_delete(self):\n template = copy.deepcopy(self.lb_template)\n ssl_termination_template = {'securePort': 443, 'privatekey':\n private_key, 'certificate': cert, 'intermediateCertificate': '',\n 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sslTermination'\n ] = ssl_termination_template\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False, 'enabled': True})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sslTermination']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({'securePort': 443,\n 'certificate': cert, 'secureTrafficOnly': False})\n self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')\n fake_lb.delete_ssl_termination()\n fake_lb.get_ssl_termination().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['metadata'] = {'a': 1, 'b': 2}\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({})\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n self.m.StubOutWithMock(fake_lb, 'set_metadata')\n fake_lb.set_metadata({'a': 1, 'b': 2})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['metadata'] = {'a': 1,\n 'b': 2}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['metadata'] = mox.SameElementsAs([{'key': 'a',\n 'value': 1}, {'key': 'b', 'value': 2}])\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['metadata']\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n fake_lb.get_metadata().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'delete_metadata')\n fake_lb.delete_metadata()\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['errorPage'] = error_page\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': 'foo'}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>Service Unavailable</h2>The service is unavailable</body></html>'\n )\n template['Resources'][lb_name]['Properties']['errorPage'] = error_page\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({})\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['errorPage']\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'clear_error_page')\n fake_lb.clear_error_page()\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({'errorpage': {'content':\n error_page}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': ''}})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_logging_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = True\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertTrue(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.connection_logging = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.connection_logging = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionLogging']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionLogging'\n ] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = False\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionThrottle'] = {'maxConnections'\n : 1000}\n self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 100})\n fake_lb.add_connection_throttle(maxConnections=1000,\n maxConnectionRate=None, minConnections=None, rateInterval=None)\n fake_lb.get_connection_throttle().AndReturn({'maxConnectionRate':\n None, 'minConnections': None, 'rateInterval': None,\n 'maxConnections': 1000})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['connectionThrottle'] = {\n 'maxConnections': 1000}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionThrottle'] = {'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None}\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionThrottle']\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({'maxConnections': 1000,\n 'maxConnectionRate': None, 'rateInterval': None,\n 'minConnections': None})\n self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')\n fake_lb.delete_connection_throttle()\n fake_lb.get_connection_throttle().AndReturn({})\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'ENABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_deleted(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['contentCaching']\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['contentCaching'\n ] = 'ENABLED'\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'DISABLED'\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_immutable(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(Exception('immutable'))\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_non_immutable_exc(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(FakeException())\n self.m.ReplayAll()\n exc = self.assertRaises(exception.ResourceFailure, scheduler.\n TaskRunner(rsrc.delete))\n self.assertIn('FakeException', six.text_type(exc))\n self.m.VerifyAll()\n\n def test_delete_states(self):\n template = self._set_template(self.lb_template, contentCaching=\n 'ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.UnsetStubs()\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb3 = copy.deepcopy(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2.status = 'PENDING_DELETE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n fake_lb3.status = 'DELETED'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_redir(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTPS', 'port': 443,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n props['protocol'] = 'HTTP'\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb_2',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test_2', mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n\n def test_invalid_redir_proto(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'TCP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_invalid_redir_ssl(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True, 'protocol': 'HTTP', 'port': 1234,\n 'nodes': [], 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition('test_lb',\n LoadBalancerWithFakeClient, properties=props)\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': False, 'securePort':\n 443, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n props['sslTermination'] = {'secureTrafficOnly': True, 'securePort':\n 1234, 'privatekey': 'bobloblaw', 'certificate': 'mycert'}\n mock_lb = lb.CloudLoadBalancer('test', mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed, mock_lb.\n validate)\n self.assertIn('HTTPS redirect is only available', six.text_type(ex))\n\n def test_update_nodes_condition_draining(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'DRAINING', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': [expected_ip], 'port': \n 80, 'condition': 'DRAINING', 'type': 'PRIMARY', 'weight': 1}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address=expected_ip, port=80,\n condition='DRAINING', type='PRIMARY', weight=1)])\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'DRAINING', type='PRIMARY', weight=1), FakeNode(\n address=u'172.168.1.4', port=80, condition=u'DRAINING', type=\n 'PRIMARY', weight=1)]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_add_same_address_different_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template, self.\n lb_name, self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n fake_lb.tracker = 'fake_lb'\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80, 'condition': 'ENABLED', 'type':\n 'PRIMARY', 'weight': 1}, {'addresses': ['166.78.103.141'],\n 'port': 81, 'condition': 'ENABLED', 'type': 'PRIMARY', 'weight': 1}\n ]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([fake_lb1.Node(address='166.78.103.141', port=81,\n condition='ENABLED', type='PRIMARY', weight=1)])\n fake_lb1.tracker = 'fake_lb1'\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [FakeNode(address=u'166.78.103.141', port=80,\n condition=u'ENABLED', type='PRIMARY', weight=1), FakeNode(\n address=u'166.78.103.141', port=81, condition=u'ENABLED', type=\n 'PRIMARY', weight=1)]\n fake_lb2.tracker = 'fake_lb2'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_defaults(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]\n tmpl_node['type'] = 'PRIMARY'\n tmpl_node['condition'] = 'ENABLED'\n tmpl_node['weight'] = 1\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = [FakeNode(address=u'166.78.103.141', port=\n 80, condition=u'ENABLED', type='PRIMARY', weight=1)]\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [{'addresses': [\n '166.78.103.141'], 'port': 80}]\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n",
"step-5": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport copy\nimport json\nimport uuid\n\nimport mock\nimport mox\nimport six\n\nfrom heat.common import exception\nfrom heat.common import template_format\nfrom heat.engine import resource\nfrom heat.engine import rsrc_defn\nfrom heat.engine import scheduler\nfrom heat.tests import common\nfrom heat.tests import utils\n\nfrom ..resources import cloud_loadbalancer as lb # noqa\n\n# The following fakes are for pyrax\n\n\ncert = \"\"\"\\n-----BEGIN CERTIFICATE-----\nMIIFBjCCAu4CCQDWdcR5LY/+/jANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB\nVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0\ncyBQdHkgTHRkMB4XDTE0MTAxNjE3MDYxNVoXDTE1MTAxNjE3MDYxNVowRTELMAkG\nA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0\nIFdpZGdpdHMgUHR5IEx0ZDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB\nAMm5NcP0tMKHblT6Ud1k8TxZ9/8uOHwUNPbvFsvSyCupj0J0vGCTjbuC2I5T/CXR\ntnLEIt/EarlNAqcjbDCWtSyEKs3zDmmkreoIDEa8pyAQ2ycsCXGMxDN97F3/wlLZ\nagUNM0FwGHLZWBg62bM6l+bpTUcX0PqSyv/aVMhJ8EPDX0Dx1RYsVwUzIe/HWC7x\nvCmtDApAp1Fwq7AwlRaKU17sGwPWJ8+I8PyouBdqNuslHm7LQ0XvBA5DfkQA6feB\nZeJIyOtctM9WFWQI5fKOsyt5P306B3Zztw9VZLAmZ8qHex+R1WY1zXxDAwKEQz/X\n8bRqMA/VU8OxJcK0AmY/1v/TFmAlRh2XBCIc+5UGtCcftWvZJAsKur8Hg5pPluGv\nptyqSgSsSKtOVWkyTANP1LyOkpBA8Kmkeo2CKXu1SCFypY5Q6E+Fy8Y8RaHJPvzR\nNHcm1tkBvHOKyRso6FjvxuJEyIC9EyUK010nwQm7Qui11VgCSHBoaKVvkIbFfQdK\naCes0oQO5dqY0+fC/IFDhrxlvSd2Wk7KjuNjNu9kVN9Ama2pRTxhYKaN+GsHfoL7\nra6G9HjbUVULAdjCko3zOKEUzFLLf1VZYk7hDhyv9kovk0b8sr5WowxW7+9Wy0NK\nWL5f2QgVCcoHw9bGhyuYQCdBfztNmKOWe9pGj6bQAx4pAgMBAAEwDQYJKoZIhvcN\nAQEFBQADggIBALFSj3G2TEL/UWtNcPeY2fbxSGBrboFx3ur8+zTkdZzvfC8H9/UK\nw0aRH0rK4+lKYDqF6A9bUHP17DaJm1lF9In38VVMOuur0ehUIn1S2U3OvlDLN68S\np5D4wGKMcUfUQ6pzhSKJCMvGX561TKHCc5fZhPruy75Xq2DcwJENE189foKLFvJs\nca4sIARqP6v1vfARcfH5leSsdIq8hy6VfL0BRATXfNHZh4SNbyDJYYTxrEUPHYXW\npzW6TziZXYNMG2ZRdHF/mDJuFzw2EklOrPC9MySCZv2i9swnqyuwNYh/SAMhodTv\nZDGy4nbjWNe5BflTMBceh45VpyTcnQulFhZQFwP79fK10BoDrOc1mEefhIqT+fPI\nLJepLOf7CSXtYBcWbmMCLHNh+PrlCiA1QMTyd/AC1vvoiyCbs3M419XbXcBSDEh8\ntACplmhf6z1vDkElWiDr8y0kujJ/Gie24iLTun6oHG+f+o6bbQ9w196T0olLcGx0\noAYL0Olqli6cWHhraVAzZ5t5PH4X9TiESuQ+PMjqGImCIUscXY4objdnB5dfPHoz\neF5whPl36/GK8HUixCibkCyqEOBBuNqhOz7nVLM0eg5L+TE5coizEBagxVCovYSj\nfQ9zkIgaC5oeH6L0C1FFG1vRNSWokheBk14ztVoJCJyFr6p0/6pD7SeR\n-----END CERTIFICATE-----\\n\"\"\"\n\nprivate_key = \"\"\"\\n-----BEGIN PRIVATE KEY-----\nMIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJuTXD9LTCh25U\n+lHdZPE8Wff/Ljh8FDT27xbL0sgrqY9CdLxgk427gtiOU/wl0bZyxCLfxGq5TQKn\nI2wwlrUshCrN8w5ppK3qCAxGvKcgENsnLAlxjMQzfexd/8JS2WoFDTNBcBhy2VgY\nOtmzOpfm6U1HF9D6ksr/2lTISfBDw19A8dUWLFcFMyHvx1gu8bwprQwKQKdRcKuw\nMJUWilNe7BsD1ifPiPD8qLgXajbrJR5uy0NF7wQOQ35EAOn3gWXiSMjrXLTPVhVk\nCOXyjrMreT99Ogd2c7cPVWSwJmfKh3sfkdVmNc18QwMChEM/1/G0ajAP1VPDsSXC\ntAJmP9b/0xZgJUYdlwQiHPuVBrQnH7Vr2SQLCrq/B4OaT5bhr6bcqkoErEirTlVp\nMkwDT9S8jpKQQPCppHqNgil7tUghcqWOUOhPhcvGPEWhyT780TR3JtbZAbxziskb\nKOhY78biRMiAvRMlCtNdJ8EJu0LotdVYAkhwaGilb5CGxX0HSmgnrNKEDuXamNPn\nwvyBQ4a8Zb0ndlpOyo7jYzbvZFTfQJmtqUU8YWCmjfhrB36C+62uhvR421FVCwHY\nwpKN8zihFMxSy39VWWJO4Q4cr/ZKL5NG/LK+VqMMVu/vVstDSli+X9kIFQnKB8PW\nxocrmEAnQX87TZijlnvaRo+m0AMeKQIDAQABAoICAA8DuBrDxgiMqAuvLhS6hLIn\nSCw4NoAVyPNwTFQTdk65qi4aHkNZ+DyyuoetfKEcAOZ97tKU/hSYxM/H9S+QqB+O\nHtmBc9stJLy8qJ1DQXVDi+xYfMN05M2oW8WLWd1szVVe7Ce8vjUeNE5pYvbSL6hC\nSTw3a5ibAH0WtSTLTBTfH+HnniKuXjPG4InGXqvv1j+L38+LjGilaEIO+6nX1ejE\nziX09LWfzcAglsM3ZqsN8jvw6Sr1ZWniYC2Tm9aOTRUQsdPC7LpZ//GYL/Vj5bYg\nqjcZ8KBCcKe1hW8PDL6oYuOwqR+YdZkAK+MuEQtZeWYiWT10dW2la9gYKe2OZuQ1\n7q3zZ6zLP+XP+0N7DRMTTuk2gurBVX7VldzIzvjmW8X+8Q5QO+EAqKr2yordK3S1\nuYcKmyL4Nd6rSFjRo0zSqHMNOyKt3b1r3m/eR2W623rT5uTjgNYpiwCNxnxmcjpK\nSq7JzZKz9NLbEKQWsP9gQ3G6pp3XfLtoOHEDkSKMmQxd8mzK6Ja/9iC+JGqRTJN+\nSTe1vL9L2DC7GnjOH1h2TwLoLtQWSGebf/GBxju0e5pAL0UYWBNjAwcpOoRU9J5J\ny9E7sNbbXTmK2rg3B/5VKGQckBWfurg7CjAmHGgz9xxceJQLKvT1O5zHZc+v4TVB\nXDZjtz8L2k3wFLDynDY5AoIBAQDm2fFgx4vk+gRFXPoLNN34Jw2fT+xuwD/H7K0e\n0Cas0NfyNil/Kbp+rhMHuVXTt86BIY+z8GO4wwn+YdDgihBwobAh2G9T/P6wNm+Q\nNcIeRioml8V/CP7lOQONQJ6sLTRYnNLfB96uMFe+13DO/PjFybee5VflfBUrJK1M\nDqRLwm9wEIf5p0CWYI/ZJaDNN71B09BB/jdT/e7Ro1hXHlq3W4tKqRDPfuUqwy3H\nocYQ1SUk3oFdSiYFd6PijNkfTnrtyToa0xUL9uGL+De1LfgV+uvqkOduQqnpm/5+\nXQC1qbTUjq+4WEsuPjYf2E0WAVFGzwzWcdb0LnMIUJHwPvpLAoIBAQDfsvCZlcFM\nnGBk1zUnV3+21CPK+5+X3zLHr/4otQHlGMFL6ZiQManvKMX6a/cT3rG+LvECcXGD\njSsTu7JIt9l8VTpbPaS76htTmQYaAZERitBx1C8zDMuI2O4bjFLUGUX73RyTZdRm\nG68IX+7Q7SL8zr/fHjcnk+3yj0L1soAVPC7lY3se7vQ/SCre97E+noP5yOhrpnRt\ndij7NYy79xcvUZfc/z0//Ia4JSCcIvv2HO7JZIPzUCVO4sjbUOGsgR9pwwQkwYeP\nb5P0MVaPgFnOgo/rz6Uqe+LpeY83SUwc2q8W8bskzTLZEnwSV5bxCY+gIn9KCZSG\n8QxuftgIiQDbAoIBAQDQ2oTC5kXulzOd/YxK7z2S8OImLAzf9ha+LaZCplcXKqr0\ne4P3hC0xxxN4fXjk3vp5YX+9b9MIqYw1FRIA02gkPmQ3erTd65oQmm88rSY+dYRU\n/iKz19OkVnycIsZrR0qAkQFGvrv8I8h+5DMvUTdQ2jrCCwQGnsgYDEqs8OI7mGFx\npcMfXu3UHvCFqMFeaPtUvuk/i1tLJgYWrA2UY+X21V+j4GlREKEMmyCj5/xl5jCA\ntr2bRSY49BDVOlCFPl+BGfjzo9z6whU0qRDdXgWA/U7LHOYEn1NSAsuwTzwBHtR3\nKdBYm6kI4Ufeb7buHasGwPQAX2X17MAt2ZbvIEsZAoIBAQC4g5dzh5PGhmH4K48b\nYU/l1TukzUIJekAfd+ozV4I1nuKppAeEQILD0yTh9zX4vMJtdbiz5DDWapWylCpt\nUsBgjsgwxDriCSr7HIhs4QfwqUhf67325MHpoc1dCbS0YBhatDpC1kaI5qLMTJzm\n1gL69epLtleWHK2zWjnIAbEmUtr3uMOwczciD3vVKAeZ+BQx72bOjKESPNl2w+fO\njvQfwrR5xEqYQco5j95DC5Q6oAjSM0enZV8wn10/kYpjyKnJieMcEkmnpUgrrpqQ\niTUKYqUlw8OftEopfGwGFT5junmbek57/4nGhTmzw22sac9/LZVC034ghClV5uh4\nudDrAoIBAQCJHfBPJmJMT/WtSATTceVDgZiyezWNgH2yLJMqDP6sEuImnLAg2L9M\nYc6LqMcHLj7CyXfy2AEAuYTZwXFSRmVKl6Ycad7sS/hIL1ykvDveRU9VNImexDBq\nAJR4GKr6jbRZnBztnRYZTsGA+TcrFc6SwdSPXgz7JQT9uw+JkhLi59m141XBdeRc\nNQ/LFgOaxjvRUID81izQaYEyADId7asy+2QVazMDafuALJ23WSUMSXajCXaC6/7N\n53RWrOAb+kFRgjuHM8pQkpgnY/Ds0MZxpakFw3Y7PAEL99xyYdR+rE3JOMjPlgr0\nLpTt0Xs1OFZxaNpolW5Qis4os7UmmIRV\n-----END PRIVATE KEY-----\\n\"\"\"\n\n\nclass FakeException(Exception):\n pass\n\n\nclass FakeClient(object):\n user_agent = \"Fake\"\n USER_AGENT = \"Fake\"\n\n\nclass FakeManager(object):\n api = FakeClient()\n\n def list(self):\n pass\n\n def get(self, item):\n pass\n\n def delete(self, item):\n pass\n\n def create(self, *args, **kwargs):\n pass\n\n def find(self, *args, **kwargs):\n pass\n\n def action(self, item, action_type, body=None):\n pass\n\n\nclass FakeLoadBalancerManager(object):\n def __init__(self, api=None, *args, **kwargs):\n pass\n\n def set_content_caching(self, *args, **kwargs):\n pass\n\n\nclass FakeNode(object):\n def __init__(self, address=None, port=None, condition=None, weight=None,\n status=None, parent=None, type=None, id=None):\n if not (address and port):\n # This mimics the check that pyrax does on Node instantiation\n raise TypeError(\"You must include an address and \"\n \"a port when creating a node.\")\n self.address = address\n self.port = port\n self.condition = condition\n self.weight = weight\n self.status = status\n self.parent = parent\n self.type = type\n self.id = id\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def update(self):\n pass\n\n def delete(self):\n pass\n\n\nclass FakeVirtualIP(object):\n def __init__(self, address=None, port=None, condition=None,\n ipVersion=None, type=None, id=None):\n self.address = address\n self.port = port\n self.condition = condition\n self.ipVersion = ipVersion\n self.type = type\n self.id = id\n self.ip_version = ipVersion\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass FakeLoadBalancerClient(object):\n def __init__(self, *args, **kwargs):\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n pass\n\n def get(self, *args, **kwargs):\n pass\n\n def create(self, *args, **kwargs):\n pass\n\n\nclass FakeLoadBalancer(object):\n def __init__(self, name=None, info=None, *args, **kwargs):\n name = name or uuid.uuid4()\n info = info or {\"fake\": \"fake\"}\n self.id = uuid.uuid4()\n self.manager = FakeLoadBalancerManager()\n self.Node = FakeNode\n self.VirtualIP = FakeVirtualIP\n self.nodes = []\n self.algorithm = \"ROUND_ROBIN\"\n self.session_persistence = \"HTTP_COOKIE\"\n self.connection_logging = False\n self.timeout = None\n self.httpsRedirect = False\n self.protocol = None\n self.port = None\n self.name = None\n self.halfClosed = None\n self.content_caching = False\n\n def get(self, *args, **kwargs):\n pass\n\n def add_nodes(self, *args, **kwargs):\n pass\n\n def add_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_error_page(self, *args, **kwargs):\n pass\n\n def clear_error_page(self, *args, **kwargs):\n pass\n\n def add_access_list(self, *args, **kwargs):\n pass\n\n def update(self, *args, **kwargs):\n pass\n\n def add_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_health_monitor(self, *args, **kwargs):\n pass\n\n def delete_ssl_termination(self, *args, **kwargs):\n pass\n\n def set_metadata(self, *args, **kwargs):\n pass\n\n def delete_metadata(self, *args, **kwargs):\n pass\n\n def add_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete_connection_throttle(self, *args, **kwargs):\n pass\n\n def delete(self, *args, **kwargs):\n pass\n\n def get_health_monitor(self, *args, **kwargs):\n return {}\n\n def get_metadata(self, *args, **kwargs):\n return {}\n\n def get_error_page(self, *args, **kwargs):\n pass\n\n def get_connection_throttle(self, *args, **kwargs):\n pass\n\n def get_ssl_termination(self, *args, **kwargs):\n pass\n\n def get_access_list(self, *args, **kwargs):\n pass\n\n\nclass LoadBalancerWithFakeClient(lb.CloudLoadBalancer):\n def cloud_lb(self):\n return FakeLoadBalancerClient()\n\n\ndef override_resource():\n return {\n 'Rackspace::Cloud::LoadBalancer': LoadBalancerWithFakeClient\n }\n\n\nclass LoadBalancerTest(common.HeatTestCase):\n\n def setUp(self):\n super(LoadBalancerTest, self).setUp()\n\n self.lb_template = {\n \"AWSTemplateFormatVersion\": \"2010-09-09\",\n \"Description\": \"fawef\",\n \"Resources\": {\n self._get_lb_resource_name(): {\n \"Type\": \"Rackspace::Cloud::LoadBalancer\",\n \"Properties\": {\n \"name\": \"test-clb\",\n \"nodes\": [{\"addresses\": [\"166.78.103.141\"],\n \"port\": 80,\n \"condition\": \"ENABLED\"}],\n \"protocol\": \"HTTP\",\n \"port\": 80,\n \"virtualIps\": [\n {\"type\": \"PUBLIC\", \"ipVersion\": \"IPV6\"}],\n \"algorithm\": 'LEAST_CONNECTIONS',\n \"connectionThrottle\": {'maxConnectionRate': 1000},\n 'timeout': 110,\n 'contentCaching': 'DISABLED'\n }\n }\n }\n }\n\n self.lb_name = 'test-clb'\n self.expected_body = {\n \"nodes\": [FakeNode(address=u\"166.78.103.141\", port=80,\n condition=u\"ENABLED\", type=u\"PRIMARY\",\n weight=1)],\n \"protocol\": u'HTTP',\n \"port\": 80,\n \"virtual_ips\": [FakeVirtualIP(type=u\"PUBLIC\", ipVersion=u\"IPV6\")],\n \"algorithm\": u'LEAST_CONNECTIONS',\n \"connectionThrottle\": {'maxConnectionRate': 1000,\n 'maxConnections': None,\n 'rateInterval': None,\n 'minConnections': None},\n \"connectionLogging\": None,\n \"halfClosed\": None,\n \"healthMonitor\": None,\n \"metadata\": None,\n \"sessionPersistence\": None,\n \"timeout\": 110,\n \"httpsRedirect\": False\n\n }\n\n lb.resource_mapping = override_resource\n resource._register_class(\"Rackspace::Cloud::LoadBalancer\",\n LoadBalancerWithFakeClient)\n\n def _get_lb_resource_name(self):\n return \"lb-\" + str(uuid.uuid4())\n\n def __getattribute__(self, name):\n if name == 'expected_body' or name == 'lb_template':\n return copy.deepcopy(super(LoadBalancerTest, self)\n .__getattribute__(name))\n return super(LoadBalancerTest, self).__getattribute__(name)\n\n def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):\n resource_defns = tmpl.resource_definitions(stack)\n rsrc = LoadBalancerWithFakeClient(resource_name,\n resource_defns[resource_name],\n stack)\n\n fake_lb = FakeLoadBalancer(name=lb_name)\n fake_lb.status = 'ACTIVE'\n fake_lb.resource_id = 1234\n\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)\n\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(\n fake_lb)\n\n return (rsrc, fake_lb)\n\n def _get_first_resource_name(self, templ):\n return next(k for k in templ['Resources'])\n\n def _mock_loadbalancer(self, lb_template, expected_name, expected_body):\n t = template_format.parse(json.dumps(lb_template))\n self.stack = utils.parse_stack(t, stack_name=utils.random_name())\n\n rsrc, fake_lb = self._mock_create(self.stack.t, self.stack,\n self.\n _get_first_resource_name(\n lb_template),\n expected_name,\n expected_body)\n return (rsrc, fake_lb)\n\n def _set_template(self, templ, **kwargs):\n for k, v in six.iteritems(kwargs):\n templ['Resources'][self._get_first_resource_name(templ)][\n 'Properties'][k] = v\n return templ\n\n def _set_expected(self, expected, **kwargs):\n for k, v in six.iteritems(kwargs):\n expected[k] = v\n return expected\n\n def test_process_node(self):\n nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True},\n {'addresses': ['4567', '8901', '8903'], 'port': 80,\n 'enabled': True},\n {'addresses': [], 'port': 80, 'enabled': True}]\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},\n {'address': '4567', 'port': 80, 'enabled': True},\n {'address': '8901', 'port': 80, 'enabled': True},\n {'address': '8903', 'port': 80, 'enabled': True}]\n self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))\n\n def test_nodeless(self):\n \"\"\"It's possible to create a LoadBalancer resource with no nodes.\"\"\"\n template = self._set_template(self.lb_template,\n nodes=[])\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = []\n rsrc, fake_lb = self._mock_loadbalancer(\n template, self.lb_name, expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_alter_properties(self):\n # test alter properties functions\n template = self._set_template(self.lb_template,\n sessionPersistence='HTTP_COOKIE',\n connectionLogging=True,\n metadata={'yolo': 'heeyyy_gurl'})\n\n expected = self._set_expected(self.expected_body,\n sessionPersistence={\n 'persistenceType': 'HTTP_COOKIE'},\n connectionLogging={'enabled': True},\n metadata=[\n {'key': 'yolo',\n 'value': 'heeyyy_gurl'}])\n\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_validate_vip(self):\n snippet = {\n \"nodes\": [],\n \"protocol\": 'HTTP',\n \"port\": 80,\n \"halfClosed\": None,\n \"algorithm\": u'LEAST_CONNECTIONS',\n \"virtualIps\": [{\"id\": \"1234\"}]\n }\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n # happy path\n resdef = rsrc_defn.ResourceDefinition(\"testvip\",\n lb.CloudLoadBalancer,\n properties=snippet)\n rsrc = lb.CloudLoadBalancer(\"testvip\", resdef, stack)\n self.assertIsNone(rsrc.validate())\n # make sure the vip id prop is exclusive\n snippet[\"virtualIps\"][0][\"type\"] = \"PUBLIC\"\n exc = self.assertRaises(exception.StackValidationFailed,\n rsrc.validate)\n self.assertIn(\"Cannot specify type or version\", str(exc))\n # make sure you have to specify type and version if no id\n snippet[\"virtualIps\"] = [{}]\n exc = self.assertRaises(exception.StackValidationFailed,\n rsrc.validate)\n self.assertIn(\"Must specify VIP type and version\", str(exc))\n\n def test_validate_half_closed(self):\n # test failure (invalid protocol)\n template = self._set_template(self.lb_template, halfClosed=True)\n expected = self._set_expected(self.expected_body, halfClosed=True)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed,\n rsrc.validate)\n self.assertIn('The halfClosed property is only available for the TCP'\n ' or TCP_CLIENT_FIRST protocols', str(exc))\n\n # test TCP protocol\n template = self._set_template(template, protocol='TCP')\n expected = self._set_expected(expected, protocol='TCP')\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n # test TCP_CLIENT_FIRST protocol\n template = self._set_template(template,\n protocol='TCP_CLIENT_FIRST')\n expected = self._set_expected(expected,\n protocol='TCP_CLIENT_FIRST')\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_health_monitor(self):\n # test connect success\n health_monitor = {\n 'type': 'CONNECT',\n 'attemptsBeforeDeactivation': 1,\n 'delay': 1,\n 'timeout': 1\n }\n template = self._set_template(self.lb_template,\n healthMonitor=health_monitor)\n expected = self._set_expected(self.expected_body,\n healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n\n self.assertIsNone(rsrc.validate())\n\n # test connect failure\n # bodyRegex is only valid for type 'HTTP(S)'\n health_monitor['bodyRegex'] = 'dfawefawe'\n template = self._set_template(template,\n healthMonitor=health_monitor)\n expected = self._set_expected(expected,\n healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n exc = self.assertRaises(exception.StackValidationFailed,\n rsrc.validate)\n self.assertIn('Unknown Property bodyRegex', str(exc))\n\n # test http fields\n health_monitor['type'] = 'HTTP'\n health_monitor['bodyRegex'] = 'bodyRegex'\n health_monitor['statusRegex'] = 'statusRegex'\n health_monitor['hostHeader'] = 'hostHeader'\n health_monitor['path'] = 'path'\n\n template = self._set_template(template,\n healthMonitor=health_monitor)\n expected = self._set_expected(expected,\n healthMonitor=health_monitor)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_validate_ssl_termination(self):\n ssl_termination = {\n 'privatekey': 'ewfawe',\n 'intermediateCertificate': 'fwaefawe',\n 'secureTrafficOnly': True\n }\n\n # test ssl termination enabled without required fields failure\n template = self._set_template(self.lb_template,\n sslTermination=ssl_termination)\n expected = self._set_expected(self.expected_body,\n sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n\n exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)\n self.assertIn(\"Property certificate not assigned\", six.text_type(exc))\n\n ssl_termination['certificate'] = 'dfaewfwef'\n template = self._set_template(template,\n sslTermination=ssl_termination)\n expected = self._set_expected(expected,\n sslTermination=ssl_termination)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.assertIsNone(rsrc.validate())\n\n def test_ssl_termination_unstripped_certificates(self):\n ssl_termination_template = {\n 'securePort': 443,\n 'privatekey': 'afwefawe',\n 'certificate': ' \\nfawefwea\\n ',\n 'intermediateCertificate': \"\\n\\nintermediate_certificate\\n\",\n 'secureTrafficOnly': False\n }\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n\n template = self._set_template(self.lb_template,\n sslTermination=ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443,\n 'certificate': 'fawefwea',\n 'intermediateCertificate': \"intermediate_certificate\",\n 'secureTrafficOnly': False,\n 'enabled': True,\n })\n\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ssl_termination_intermediateCertificate_None(self):\n ssl_termination_template = {\n 'securePort': 443,\n 'privatekey': 'afwefawe',\n 'certificate': ' \\nfawefwea\\n ',\n 'intermediateCertificate': None,\n 'secureTrafficOnly': False\n }\n\n template = self._set_template(self.lb_template,\n sslTermination=ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443,\n 'certificate': 'fawefwea',\n 'secureTrafficOnly': False,\n 'enabled': True,\n })\n\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n add_ssl_termination_args = {\n 'securePort': 443,\n 'privatekey': 'afwefawe',\n 'certificate': ' \\nfawefwea\\n ',\n 'intermediateCertificate': '',\n 'secureTrafficOnly': False\n }\n fake_lb.add_ssl_termination(**add_ssl_termination_args)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_access_list(self):\n access_list = [{\"address\": '192.168.1.1/0',\n 'type': 'ALLOW'},\n {'address': '172.165.3.43',\n 'type': 'DENY'}]\n api_access_list = [{\"address\": '192.168.1.1/0', 'id': 1234,\n 'type': 'ALLOW'},\n {'address': '172.165.3.43', 'id': 3422,\n 'type': 'DENY'}]\n\n template = self._set_template(self.lb_template,\n accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn([])\n fake_lb.get_access_list().AndReturn(api_access_list)\n\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n fake_lb.add_access_list(access_list)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_ref_id(self):\n \"\"\"The Reference ID of the resource is the resource ID.\"\"\"\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())\n\n def test_post_creation_error_page(self):\n error_page = \"REALLY BIG ERROR\"\n\n template = self._set_template(self.lb_template,\n errorPage=error_page)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})\n fake_lb.get_error_page().AndReturn(\n {u'errorpage': {u'content': error_page}})\n\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_ssl_termination(self):\n ssl_termination_template = {\n 'securePort': 443,\n 'privatekey': 'afwefawe',\n 'certificate': 'fawefwea',\n 'intermediateCertificate': \"intermediate_certificate\",\n 'secureTrafficOnly': False\n }\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n\n template = self._set_template(self.lb_template,\n sslTermination=ssl_termination_template)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443,\n 'certificate': 'fawefwea',\n 'intermediateCertificate': \"intermediate_certificate\",\n 'secureTrafficOnly': False,\n 'enabled': True,\n })\n\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_post_creation_content_caching(self):\n template = self._set_template(self.lb_template,\n contentCaching='ENABLED')\n rsrc = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)[0]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_check(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n loadbalancer = lb.CloudLoadBalancer(\"test\", resdef, stack)\n loadbalancer._add_event = mock.Mock()\n mock_cloud_lb = mock.Mock()\n mock_get = mock.Mock(return_value=mock_cloud_lb)\n loadbalancer.clb.get = mock_get\n\n mock_cloud_lb.status = 'ACTIVE'\n scheduler.TaskRunner(loadbalancer.check)()\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('COMPLETE', loadbalancer.status)\n\n mock_cloud_lb.status = 'FOOBAR'\n exc = self.assertRaises(exception.ResourceFailure,\n scheduler.TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('FOOBAR', str(exc))\n\n mock_get.side_effect = lb.NotFound('boom')\n exc = self.assertRaises(exception.ResourceFailure,\n scheduler.TaskRunner(loadbalancer.check))\n self.assertEqual('CHECK', loadbalancer.action)\n self.assertEqual('FAILED', loadbalancer.status)\n self.assertIn('boom', str(exc))\n\n def test_update_add_node_by_address(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [\n {\"addresses\": [\"166.78.103.141\"],\n \"port\": 80,\n \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\",\n \"weight\": 1},\n {\"addresses\": [expected_ip],\n \"port\": 80,\n \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\",\n \"weight\": 1}]\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.nodes = [\n FakeNode(address=u\"172.168.1.4\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"166.78.103.141\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n ]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'add_nodes')\n fake_lb.add_nodes([\n fake_lb.Node(address=expected_ip,\n port=80,\n condition='ENABLED',\n type=\"PRIMARY\", weight=1)])\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_resolve_attr_noid(self):\n stack = mock.Mock()\n stack.db_resource_get.return_value = None\n stack.has_cache_data.return_value = False\n resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)\n lbres = lb.CloudLoadBalancer(\"test\", resdef, stack)\n self.assertIsNone(lbres._resolve_attribute(\"PublicIp\"))\n\n def test_resolve_attr_virtualips(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4',\n type='PUBLIC',\n ipVersion=\"IPv6\",\n id='test-id')]\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n expected = [{\n 'ip_version': 'IPv6',\n 'type': 'PUBLIC',\n 'id': 'test-id',\n 'address': '1.2.3.4'}]\n self.m.ReplayAll()\n self.assertEqual(expected, rsrc._resolve_attribute(\"virtualIps\"))\n self.m.VerifyAll()\n\n def test_update_nodes_immutable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n current_nodes = [\n FakeNode(address=u\"1.1.1.1\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"2.2.2.2\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"3.3.3.3\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1)\n ]\n fake_lb.nodes = current_nodes\n fake_lb.tracker = \"fake_lb\"\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '4.4.4.4'\n update_template['Properties']['nodes'] = [\n {\"addresses\": [\"1.1.1.1\"], \"port\": 80, \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\", \"weight\": 1},\n {\"addresses\": [\"2.2.2.2\"], \"port\": 80, \"condition\": \"DISABLED\",\n \"type\": \"PRIMARY\", \"weight\": 1},\n {\"addresses\": [expected_ip], \"port\": 80, \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\", \"weight\": 1}\n ]\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.status = \"PENDING_UPDATE\"\n fake_lb1.tracker = \"fake_lb1\"\n\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # ACTIVE\n\n # Add node `expected_ip`\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE\n\n fake_lb2 = copy.deepcopy(fake_lb1)\n fake_lb2.status = \"ACTIVE\"\n fake_lb2.nodes = [\n FakeNode(address=u\"1.1.1.1\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"2.2.2.2\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"3.3.3.3\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"4.4.4.4\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n ]\n fake_lb2.tracker = \"fake_lb2\"\n\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2) # ACTIVE\n\n # Delete node 3.3.3.3\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE\n\n fake_lb3 = copy.deepcopy(fake_lb2)\n fake_lb3.status = \"ACTIVE\"\n fake_lb3.nodes = [\n FakeNode(address=u\"1.1.1.1\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"2.2.2.2\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"4.4.4.4\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1)\n ]\n fake_lb3.tracker = \"fake_lb3\"\n\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3) # ACTIVE\n\n # Update node 2.2.2.2\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE\n\n fake_lb4 = copy.deepcopy(fake_lb3)\n fake_lb4.status = \"ACTIVE\"\n fake_lb4.nodes = [\n FakeNode(address=u\"1.1.1.1\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"2.2.2.2\", port=80, condition=u\"DISABLED\",\n type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"4.4.4.4\", port=80, condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1)\n ]\n fake_lb4.tracker = \"fake_lb4\"\n\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4) # ACTIVE\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_pending_update_status(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = \"updated_name\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = \"updated_name\"\n fake_lb1.status = \"PENDING_UPDATE\" # lb is immutable\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.name = \"updated_name\"\n fake_lb2.status = \"ACTIVE\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_immutable_exception(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = \"updated_name\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # initial iteration\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # immutable\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = \"updated_name\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # after update\n\n self.m.StubOutWithMock(fake_lb, 'update')\n msg = (\"Load Balancer '%s' has a status of 'PENDING_UPDATE' and \"\n \"is considered immutable.\" % rsrc.resource_id)\n fake_lb.update(name=\"updated_name\").AndRaise(Exception(msg))\n fake_lb.update(name=\"updated_name\").AndReturn(None)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_create_immutable_exception(self):\n access_list = [{\"address\": '192.168.1.1/0',\n 'type': 'ALLOW'},\n {'address': '172.165.3.43',\n 'type': 'DENY'}]\n\n template = self._set_template(self.lb_template,\n accessList=access_list)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.StubOutWithMock(fake_lb, 'get_access_list')\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn({})\n fake_lb.get_access_list().AndReturn(access_list)\n\n self.m.StubOutWithMock(fake_lb, 'add_access_list')\n msg = (\"Load Balancer '%s' has a status of 'PENDING_UPDATE' and \"\n \"is considered immutable.\" % rsrc.resource_id)\n fake_lb.add_access_list(access_list).AndRaise(Exception(msg))\n fake_lb.add_access_list(access_list)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n def test_update_lb_name(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = \"updated_name\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = \"updated_name\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name=\"updated_name\")\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_multiple(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['name'] = \"updated_name\"\n update_template['Properties']['algorithm'] = \"RANDOM\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.name = \"updated_name\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = \"RANDOM\"\n fake_lb2.name = \"updated_name\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(name=\"updated_name\", algorithm=\"RANDOM\")\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_algorithm(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['algorithm'] = \"RANDOM\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.algorithm = \"ROUND_ROBIN\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb1, 'update')\n fake_lb1.update(algorithm=\"RANDOM\")\n\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.algorithm = \"RANDOM\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_protocol(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['protocol'] = \"IMAPS\"\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.protocol = \"IMAPS\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(protocol=\"IMAPS\")\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_redirect(self):\n template = self._set_template(\n self.lb_template, protocol=\"HTTPS\")\n\n expected = self._set_expected(\n self.expected_body, protocol=\"HTTPS\")\n\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['httpsRedirect'] = True\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(httpsRedirect=True)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_https(self):\n template = self._set_template(\n self.lb_template, protocol=\"HTTPS\", httpsRedirect=True)\n\n expected = self._set_expected(\n self.expected_body, protocol=\"HTTPS\", httpsRedirect=True)\n\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_lb_redirect_HTTP_with_SSL_term(self):\n ssl_termination_template = {\n 'privatekey': private_key,\n 'intermediateCertificate': 'fwaefawe',\n 'secureTrafficOnly': True,\n 'securePort': 443,\n 'certificate': cert\n }\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n ssl_termination_api['enabled'] = True\n del ssl_termination_api['privatekey']\n template = self._set_template(\n self.lb_template, sslTermination=ssl_termination_template,\n protocol=\"HTTP\", httpsRedirect=True)\n\n expected = self._set_expected(\n self.expected_body, protocol=\"HTTP\", httpsRedirect=False)\n\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected)\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'create')\n rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.httpsRedirect = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)\n self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n\n def test_update_lb_half_closed(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['halfClosed'] = True\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.halfClosed = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(halfClosed=True)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['port'] = 1234\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.port = 1234\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(port=1234)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_lb_timeout(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['timeout'] = 120\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.timeout = 120\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb, 'update')\n fake_lb.update(timeout=120)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['healthMonitor'] = {\n 'type': \"HTTP\", 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': \"/\",\n 'statusRegex': \"^[234][0-9][0-9]$\", 'bodyRegex': \".* testing .*\",\n 'hostHeader': \"example.com\"}\n\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn({})\n fake_lb.get_health_monitor().AndReturn(\n {'type': \"HTTP\", 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': \"/\",\n 'statusRegex': \"^[234][0-9][0-9]$\", 'bodyRegex': \".* testing .*\",\n 'hostHeader': \"example.com\"})\n\n self.m.StubOutWithMock(fake_lb, 'add_health_monitor')\n fake_lb.add_health_monitor(\n attemptsBeforeDeactivation=4, bodyRegex='.* testing .*', delay=10,\n hostHeader='example.com', path='/',\n statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_health_monitor_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n hm = {'type': \"HTTP\", 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': \"/\",\n 'statusRegex': \"^[234][0-9][0-9]$\", 'bodyRegex': \".* testing .*\",\n 'hostHeader': \"example.com\"}\n template['Resources'][lb_name]['Properties']['healthMonitor'] = hm\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['healthMonitor'] = hm\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['healthMonitor']\n\n self.m.StubOutWithMock(fake_lb, 'get_health_monitor')\n fake_lb.get_health_monitor().AndReturn(\n {'type': \"HTTP\", 'delay': 10, 'timeout': 10,\n 'attemptsBeforeDeactivation': 4, 'path': \"/\",\n 'statusRegex': \"^[234][0-9][0-9]$\", 'bodyRegex': \".* testing .*\",\n 'hostHeader': \"example.com\"})\n fake_lb.get_health_monitor().AndReturn({})\n\n self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')\n fake_lb.delete_health_monitor()\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_session_persistence_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sessionPersistence'] = 'SOURCE_IP'\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('SOURCE_IP', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_session_persistence_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'sessionPersistence'] = \"SOURCE_IP\"\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['sessionPersistence'] = {'persistenceType': \"SOURCE_IP\"}\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sessionPersistence']\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertEqual('', fake_lb.session_persistence)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['sslTermination'] = {\n 'securePort': 443, 'privatekey': private_key, 'certificate': cert,\n 'secureTrafficOnly': False, 'intermediateCertificate': ''}\n\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443, 'certificate': cert,\n 'secureTrafficOnly': False, 'enabled': True})\n\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(\n securePort=443, privatekey=private_key, certificate=cert,\n secureTrafficOnly=False, intermediateCertificate='')\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_ssl_termination_delete(self):\n template = copy.deepcopy(self.lb_template)\n ssl_termination_template = {\n 'securePort': 443, 'privatekey': private_key, 'certificate': cert,\n 'intermediateCertificate': '', 'secureTrafficOnly': False}\n ssl_termination_api = copy.deepcopy(ssl_termination_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['sslTermination'] = (\n ssl_termination_template)\n # The SSL termination config is done post-creation, so no need\n # to modify self.expected_body\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({})\n\n self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')\n fake_lb.add_ssl_termination(**ssl_termination_api)\n\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443, 'certificate': cert,\n 'secureTrafficOnly': False, 'enabled': True})\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['sslTermination']\n\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(\n fake_lb)\n\n self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')\n fake_lb.get_ssl_termination().AndReturn({\n 'securePort': 443, 'certificate': cert,\n 'secureTrafficOnly': False})\n\n self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')\n fake_lb.delete_ssl_termination()\n\n fake_lb.get_ssl_termination().AndReturn({})\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['metadata'] = {'a': 1, 'b': 2}\n\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({})\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n\n self.m.StubOutWithMock(fake_lb, 'set_metadata')\n fake_lb.set_metadata({'a': 1, 'b': 2})\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_metadata_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties']['metadata'] = {\n 'a': 1, 'b': 2}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['metadata'] = mox.SameElementsAs(\n [{'key': 'a', 'value': 1},\n {'key': 'b', 'value': 2}])\n rsrc, fake_lb = self._mock_loadbalancer(\n template, self.lb_name, expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['metadata']\n\n self.m.StubOutWithMock(fake_lb, 'get_metadata')\n fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})\n fake_lb.get_metadata().AndReturn({})\n\n self.m.StubOutWithMock(fake_lb, 'delete_metadata')\n fake_lb.delete_metadata()\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>'\n 'Service Unavailable</h2>The service is unavailable</body></html>')\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['errorPage'] = error_page\n\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn(\n {'errorpage': {'content': 'foo'}})\n fake_lb.get_error_page().AndReturn(\n {'errorpage': {'content': error_page}})\n\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_errorpage_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n error_page = (\n '<html><head><title>Service Unavailable</title></head><body><h2>'\n 'Service Unavailable</h2>The service is unavailable</body></html>')\n template['Resources'][lb_name]['Properties']['errorPage'] = error_page\n # The error page config is done post-creation, so no need to\n # modify self.expected_body\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn({})\n\n self.m.StubOutWithMock(fake_lb, 'set_error_page')\n fake_lb.set_error_page(error_page)\n\n fake_lb.get_error_page().AndReturn({'errorpage':\n {'content': error_page}})\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['errorPage']\n\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(\n fake_lb)\n\n self.m.StubOutWithMock(fake_lb, 'clear_error_page')\n fake_lb.clear_error_page()\n\n self.m.StubOutWithMock(fake_lb, 'get_error_page')\n fake_lb.get_error_page().AndReturn(\n {'errorpage': {'content': error_page}})\n fake_lb.get_error_page().AndReturn({'errorpage': {'content': \"\"}})\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_logging_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = True\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertTrue(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'connectionLogging'] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.connection_logging = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.connection_logging = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionLogging']\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_logging_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'connectionLogging'] = True\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionLogging'] = {'enabled': True}\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionLogging'] = False\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.assertFalse(fake_lb.connection_logging)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_add(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['connectionThrottle'] = {\n 'maxConnections': 1000}\n\n self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn(\n {'maxConnectionRate': None, 'minConnections': None,\n 'rateInterval': None, 'maxConnections': 100})\n\n fake_lb.add_connection_throttle(\n maxConnections=1000, maxConnectionRate=None, minConnections=None,\n rateInterval=None)\n\n fake_lb.get_connection_throttle().AndReturn(\n {'maxConnectionRate': None, 'minConnections': None,\n 'rateInterval': None, 'maxConnections': 1000})\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_connection_throttle_delete(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'connectionThrottle'] = {'maxConnections': 1000}\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['connectionThrottle'] = {\n 'maxConnections': 1000, 'maxConnectionRate': None,\n 'rateInterval': None, 'minConnections': None}\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['connectionThrottle']\n\n self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')\n fake_lb.get_connection_throttle().AndReturn({\n 'maxConnections': 1000, 'maxConnectionRate': None,\n 'rateInterval': None, 'minConnections': None})\n\n self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')\n fake_lb.delete_connection_throttle()\n\n fake_lb.get_connection_throttle().AndReturn({})\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_enable(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'ENABLED'\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_deleted(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'contentCaching'] = 'ENABLED'\n # Enabling the content cache is done post-creation, so no need\n # to modify self.expected_body\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n del update_template['Properties']['contentCaching']\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_content_caching_disable(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n template['Resources'][lb_name]['Properties'][\n 'contentCaching'] = 'ENABLED'\n # Enabling the content cache is done post-creation, so no need\n # to modify self.expected_body\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['contentCaching'] = 'DISABLED'\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb1.content_caching = True\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.content_caching = False\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete(self):\n template = self._set_template(self.lb_template,\n contentCaching='ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_immutable(self):\n template = self._set_template(self.lb_template,\n contentCaching='ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))\n\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(Exception('immutable'))\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_delete_non_immutable_exc(self):\n template = self._set_template(self.lb_template,\n contentCaching='ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)\n\n self.m.StubOutWithMock(fake_lb, 'delete')\n fake_lb.delete().AndRaise(FakeException())\n self.m.ReplayAll()\n\n exc = self.assertRaises(exception.ResourceFailure,\n scheduler.TaskRunner(rsrc.delete))\n self.assertIn('FakeException', six.text_type(exc))\n self.m.VerifyAll()\n\n def test_delete_states(self):\n template = self._set_template(self.lb_template,\n contentCaching='ENABLED')\n rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.create)()\n\n self.m.UnsetStubs()\n fake_lb1 = copy.deepcopy(fake_lb)\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb3 = copy.deepcopy(fake_lb)\n self.m.StubOutWithMock(rsrc.clb, 'get')\n\n fake_lb1.status = 'ACTIVE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n fake_lb2.status = 'PENDING_DELETE'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n fake_lb3.status = 'DELETED'\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)\n\n self.m.ReplayAll()\n\n scheduler.TaskRunner(rsrc.delete)()\n self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_redir(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True,\n 'protocol': 'HTTPS',\n 'port': 443,\n 'nodes': [],\n 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition(\"test_lb\",\n LoadBalancerWithFakeClient,\n properties=props)\n mock_lb = lb.CloudLoadBalancer(\"test\", mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n props['protocol'] = 'HTTP'\n props['sslTermination'] = {\n 'secureTrafficOnly': True,\n 'securePort': 443,\n 'privatekey': \"bobloblaw\",\n 'certificate': 'mycert'\n }\n mock_resdef = rsrc_defn.ResourceDefinition(\"test_lb_2\",\n LoadBalancerWithFakeClient,\n properties=props)\n mock_lb = lb.CloudLoadBalancer(\"test_2\", mock_resdef, mock_stack)\n self.assertIsNone(mock_lb.validate())\n\n def test_invalid_redir_proto(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True,\n 'protocol': 'TCP',\n 'port': 1234,\n 'nodes': [],\n 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition(\"test_lb\",\n LoadBalancerWithFakeClient,\n properties=props)\n mock_lb = lb.CloudLoadBalancer(\"test\", mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed,\n mock_lb.validate)\n self.assertIn(\"HTTPS redirect is only available\", six.text_type(ex))\n\n def test_invalid_redir_ssl(self):\n mock_stack = mock.Mock()\n mock_stack.db_resource_get.return_value = None\n mock_stack.has_cache_data.return_value = False\n props = {'httpsRedirect': True,\n 'protocol': 'HTTP',\n 'port': 1234,\n 'nodes': [],\n 'virtualIps': [{'id': '1234'}]}\n mock_resdef = rsrc_defn.ResourceDefinition(\"test_lb\",\n LoadBalancerWithFakeClient,\n properties=props)\n mock_lb = lb.CloudLoadBalancer(\"test\", mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed,\n mock_lb.validate)\n self.assertIn(\"HTTPS redirect is only available\", six.text_type(ex))\n props['sslTermination'] = {\n 'secureTrafficOnly': False,\n 'securePort': 443,\n 'privatekey': \"bobloblaw\",\n 'certificate': 'mycert'\n }\n mock_lb = lb.CloudLoadBalancer(\"test\", mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed,\n mock_lb.validate)\n self.assertIn(\"HTTPS redirect is only available\", six.text_type(ex))\n props['sslTermination'] = {\n 'secureTrafficOnly': True,\n 'securePort': 1234,\n 'privatekey': \"bobloblaw\",\n 'certificate': 'mycert'\n }\n mock_lb = lb.CloudLoadBalancer(\"test\", mock_resdef, mock_stack)\n ex = self.assertRaises(exception.StackValidationFailed,\n mock_lb.validate)\n self.assertIn(\"HTTPS redirect is only available\", six.text_type(ex))\n\n def test_update_nodes_condition_draining(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n expected_ip = '172.168.1.4'\n update_template['Properties']['nodes'] = [\n {\"addresses\": [\"166.78.103.141\"],\n \"port\": 80,\n \"condition\": \"DRAINING\",\n \"type\": \"PRIMARY\",\n \"weight\": 1},\n {\"addresses\": [expected_ip],\n \"port\": 80,\n \"condition\": \"DRAINING\",\n \"type\": \"PRIMARY\",\n \"weight\": 1}]\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([\n fake_lb1.Node(address=expected_ip,\n port=80,\n condition='DRAINING',\n type=\"PRIMARY\", weight=1)])\n\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [\n FakeNode(address=u\"166.78.103.141\", port=80,\n condition=u\"DRAINING\", type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"172.168.1.4\", port=80,\n condition=u\"DRAINING\", type=\"PRIMARY\", weight=1),\n ]\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_add_same_address_different_port(self):\n rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,\n self.lb_name,\n self.expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n fake_lb.tracker = \"fake_lb\"\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [\n {\"addresses\": [\"166.78.103.141\"],\n \"port\": 80,\n \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\",\n \"weight\": 1},\n {\"addresses\": [\"166.78.103.141\"],\n \"port\": 81,\n \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\",\n \"weight\": 1}]\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n fake_lb1.add_nodes([\n fake_lb1.Node(address=\"166.78.103.141\",\n port=81,\n condition='ENABLED',\n type=\"PRIMARY\", weight=1)])\n fake_lb1.tracker = \"fake_lb1\"\n\n fake_lb2 = copy.deepcopy(fake_lb)\n fake_lb2.nodes = [\n FakeNode(address=u\"166.78.103.141\", port=80,\n condition=u\"ENABLED\", type=\"PRIMARY\", weight=1),\n FakeNode(address=u\"166.78.103.141\", port=81,\n condition=u\"ENABLED\", type=\"PRIMARY\", weight=1),\n ]\n fake_lb2.tracker = \"fake_lb2\"\n rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n\n def test_update_nodes_defaults(self):\n template = copy.deepcopy(self.lb_template)\n lb_name = list(six.iterkeys(template['Resources']))[0]\n tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]\n tmpl_node['type'] = \"PRIMARY\"\n tmpl_node['condition'] = \"ENABLED\"\n tmpl_node['weight'] = 1\n expected_body = copy.deepcopy(self.expected_body)\n expected_body['nodes'] = [FakeNode(address=u\"166.78.103.141\", port=80,\n condition=u\"ENABLED\",\n type=\"PRIMARY\", weight=1)]\n\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n expected_body)\n fake_lb.nodes = self.expected_body['nodes']\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n\n update_template = copy.deepcopy(rsrc.t)\n update_template['Properties']['nodes'] = [\n {\"addresses\": [\"166.78.103.141\"],\n \"port\": 80}]\n\n self.m.UnsetStubs()\n self.m.StubOutWithMock(rsrc.clb, 'get')\n fake_lb1 = copy.deepcopy(fake_lb)\n rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)\n\n self.m.StubOutWithMock(fake_lb1, 'add_nodes')\n\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.update, update_template)()\n self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)\n self.m.VerifyAll()\n",
"step-ids": [
62,
89,
97,
102,
126
]
}
|
[
62,
89,
97,
102,
126
] |
# ---------------------------------------------------------------------
# Iskratel.ESCOM.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Iskratel.ESCOM.get_version"
cache = True
interface = IGetVersion
rx_ver = re.compile(
r"^\s*SW version\s+(?P<version>\S+).*\n"
r"^\s*Boot version\s+(?P<bootprom>\S+).*\n"
r"^\s*HW version\s+(?P<hardware>\S+).*\n",
re.MULTILINE,
)
rx_ver1 = re.compile(
r"^\s+1\s+(?P<version>\S+)\s+(?P<bootprom>\S+)\s+(?P<hardware>\S+)", re.MULTILINE
)
rx_ver_escom_l = re.compile(
r"SI3000 ESCOM L Series Software,\s*Version\s(?P<version>\S+) Build (?P<version_build>\S+),",
re.MULTILINE,
)
rx_hw_escom_l = re.compile(
r"ROM:\s*System Bootstrap, Version\s*(?P<bootprom>\S+),\s*hardware version:\s*(?P<hardware>\S+)\n"
r"Serial num:(?P<serial>\S+), ID num:(?P<id_number>\S+)\n"
r"System image file is \"(?P<image>\S+)\"",
re.MULTILINE,
)
rx_platform = re.compile(r"^\s*System Description:\s+(?P<platform>.+)\n", re.MULTILINE)
rx_platform1 = re.compile(r"^\s+1\s+(?P<platform>\S+)\s*\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial number : (?P<serial>\S+)")
def execute_cli(self, **kwargs):
v = self.cli("show version", cached=True)
for platform, ver in [
("ESCOM L", self.rx_ver_escom_l),
("ESCOM", self.rx_ver),
("ESCOM", self.rx_ver1),
]:
match = ver.search(v)
if match:
break
else:
raise NotImplementedError
if platform == "ESCOM L":
hw_match = self.rx_hw_escom_l.search(v)
return {
"vendor": "Iskratel",
"version": match.group("version"),
"platform": platform,
"image": hw_match.group("image"),
"attributes": {
"Boot PROM": hw_match.group("bootprom"),
"HW version": hw_match.group("hardware"),
"Serial Number": hw_match.group("serial"),
},
}
r = {
"vendor": "Iskratel",
"version": match.group("version"),
"attributes": {
"Boot PROM": match.group("bootprom"),
"HW version": match.group("hardware"),
},
}
v = self.cli("show system", cached=True)
match = self.rx_platform.search(v)
if not match:
match = self.rx_platform1.search(v)
r["platform"] = match.group("platform")
v = self.cli("show system id", cached=True)
match = self.rx_serial.search(v)
if match:
r["attributes"]["Serial Number"] = match.group("serial")
return r
|
normal
|
{
"blob_id": "40b3c403f99044eb61740d62eda15ddd08b0f739",
"index": 1980,
"step-1": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-3": "<mask token>\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-4": "import re\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-5": "# ---------------------------------------------------------------------\n# Iskratel.ESCOM.get_version\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = \"Iskratel.ESCOM.get_version\"\n cache = True\n interface = IGetVersion\n\n rx_ver = re.compile(\n r\"^\\s*SW version\\s+(?P<version>\\S+).*\\n\"\n r\"^\\s*Boot version\\s+(?P<bootprom>\\S+).*\\n\"\n r\"^\\s*HW version\\s+(?P<hardware>\\S+).*\\n\",\n re.MULTILINE,\n )\n rx_ver1 = re.compile(\n r\"^\\s+1\\s+(?P<version>\\S+)\\s+(?P<bootprom>\\S+)\\s+(?P<hardware>\\S+)\", re.MULTILINE\n )\n rx_ver_escom_l = re.compile(\n r\"SI3000 ESCOM L Series Software,\\s*Version\\s(?P<version>\\S+) Build (?P<version_build>\\S+),\",\n re.MULTILINE,\n )\n rx_hw_escom_l = re.compile(\n r\"ROM:\\s*System Bootstrap, Version\\s*(?P<bootprom>\\S+),\\s*hardware version:\\s*(?P<hardware>\\S+)\\n\"\n r\"Serial num:(?P<serial>\\S+), ID num:(?P<id_number>\\S+)\\n\"\n r\"System image file is \\\"(?P<image>\\S+)\\\"\",\n re.MULTILINE,\n )\n rx_platform = re.compile(r\"^\\s*System Description:\\s+(?P<platform>.+)\\n\", re.MULTILINE)\n rx_platform1 = re.compile(r\"^\\s+1\\s+(?P<platform>\\S+)\\s*\\n\", re.MULTILINE)\n rx_serial = re.compile(r\"^\\s*Serial number : (?P<serial>\\S+)\")\n\n def execute_cli(self, **kwargs):\n v = self.cli(\"show version\", cached=True)\n for platform, ver in [\n (\"ESCOM L\", self.rx_ver_escom_l),\n (\"ESCOM\", self.rx_ver),\n (\"ESCOM\", self.rx_ver1),\n ]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == \"ESCOM L\":\n hw_match = self.rx_hw_escom_l.search(v)\n return {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"platform\": platform,\n \"image\": hw_match.group(\"image\"),\n \"attributes\": {\n \"Boot PROM\": hw_match.group(\"bootprom\"),\n \"HW version\": hw_match.group(\"hardware\"),\n \"Serial Number\": hw_match.group(\"serial\"),\n },\n }\n r = {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"attributes\": {\n \"Boot PROM\": match.group(\"bootprom\"),\n \"HW version\": match.group(\"hardware\"),\n },\n }\n v = self.cli(\"show system\", cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r[\"platform\"] = match.group(\"platform\")\n v = self.cli(\"show system id\", cached=True)\n match = self.rx_serial.search(v)\n if match:\n r[\"attributes\"][\"Serial Number\"] = match.group(\"serial\")\n return r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
forbidden = ['Key.esc', 'Key.cmd', 'Key.cmd_r', 'Key.menu', 'Key.pause',
'Key.scroll_lock', 'Key.print_screen', 'Key.enter', 'Key.space',
'Key.backspace', 'Key.ctrl_l', 'Key.ctrl_r', 'Key.alt_l', 'Key.alt_gr',
'Key.caps_lock', 'Key.num_lock', 'Key.tab', 'Key.shift', 'Key.shift_r',
'Key.insert', 'Key.delete', 'Key.home', 'Key.end', 'Key.page_up',
'Key.page_down', '/']
dict_ = {' ': ' ', 'Key.f1': 'F1', 'Key.f2': 'F2', 'Key.f3': 'F3', 'Key.f4':
'F4', 'Key.f5': 'F5', 'Key.f6': 'F6', 'Key.f7': 'F7', 'Key.f8': 'F8',
'Key.f9': 'F9', 'Key.f10': 'F10', 'Key.f11': 'F11', 'Key.f12': 'F12',
'<96>': 'Num 0', '<97>': 'Num 1', '<98>': 'Num 2', '<99>': 'Num 3',
'<100>': 'Num 4', '<101>': 'Num 5', '<102>': 'Num 6', '<103>': 'Num 7',
'<104>': 'Num 8', '<105>': 'Num 9', '<110>': 'Num .', 'Key.up': 'Up',
'Key.down': 'Down', 'Key.left': 'Left', 'Key.right': 'Right', '\\\\': '\\'}
|
normal
|
{
"blob_id": "995dc34ea32de4566e2804b6797d9b551b733ff3",
"index": 3406,
"step-1": "<mask token>\n",
"step-2": "forbidden = ['Key.esc', 'Key.cmd', 'Key.cmd_r', 'Key.menu', 'Key.pause',\n 'Key.scroll_lock', 'Key.print_screen', 'Key.enter', 'Key.space',\n 'Key.backspace', 'Key.ctrl_l', 'Key.ctrl_r', 'Key.alt_l', 'Key.alt_gr',\n 'Key.caps_lock', 'Key.num_lock', 'Key.tab', 'Key.shift', 'Key.shift_r',\n 'Key.insert', 'Key.delete', 'Key.home', 'Key.end', 'Key.page_up',\n 'Key.page_down', '/']\ndict_ = {' ': ' ', 'Key.f1': 'F1', 'Key.f2': 'F2', 'Key.f3': 'F3', 'Key.f4':\n 'F4', 'Key.f5': 'F5', 'Key.f6': 'F6', 'Key.f7': 'F7', 'Key.f8': 'F8',\n 'Key.f9': 'F9', 'Key.f10': 'F10', 'Key.f11': 'F11', 'Key.f12': 'F12',\n '<96>': 'Num 0', '<97>': 'Num 1', '<98>': 'Num 2', '<99>': 'Num 3',\n '<100>': 'Num 4', '<101>': 'Num 5', '<102>': 'Num 6', '<103>': 'Num 7',\n '<104>': 'Num 8', '<105>': 'Num 9', '<110>': 'Num .', 'Key.up': 'Up',\n 'Key.down': 'Down', 'Key.left': 'Left', 'Key.right': 'Right', '\\\\\\\\': '\\\\'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
msg = "eduardo foi a feira"
if 'feira' in msg:
print('Sim, foi a feira')
else:
print('não ele não foi a feira')
|
normal
|
{
"blob_id": "2a83bc9157e2210da46e58c56fc0b7199856f4c0",
"index": 6287,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n",
"step-3": "msg = 'eduardo foi a feira'\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n",
"step-4": "msg = \"eduardo foi a feira\"\n\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
|
normal
|
{
"blob_id": "fa6f251f27b645fc6827285b5578fd9634c8bb30",
"index": 6361,
"step-1": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n <mask token>\n <mask token>\n <mask token>\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-2": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n <mask token>\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-3": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels,\n istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n res = self.upper_layer_stub.UpdateInput(nn_pb.ForwardMsg(batch_id=\n batch_id, output_matrix=bytes_matrix, labels=bytes_labels,\n is_train=istrain))\n <mask token>\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n <mask token>\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-4": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_nodes, current_layer_nodes, nonlin, nonlin_prime):\n \"\"\"\n datasets : the path of mnist dataset\n nonlin: activation function\n nonlin_prime: the derivative of activation function\n \"\"\"\n self.layer_name = layer_name\n self.upper_layer_addr = upper_layer\n self.lower_layer_addr = lower_layer\n self.nonlin = nonlin\n self.nonlin_prime = nonlin_prime\n self.upper_layer_stub = None\n self.lower_layer_stub = None\n self.weights_shape = current_layer_nodes, lower_layer_nodes\n self.weights = None\n self.biases = None\n self.lower_layer_outputs = {}\n self.weighted_sum_inputs = {}\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels,\n istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n res = self.upper_layer_stub.UpdateInput(nn_pb.ForwardMsg(batch_id=\n batch_id, output_matrix=bytes_matrix, labels=bytes_labels,\n is_train=istrain))\n\n def backward_to_lower(self, batch_id, partial_delta, labels):\n \"\"\"\n back propagate error partial_delta to lower layer\n partial_delta = dot(self.weights.T, self.delta)\n self.delta = delta_received_from_upper * nonlin_prime(z)\n \"\"\"\n if not self.lower_layer_stub:\n self.create_lower_stub()\n bytes_delta = pkl.dumps(partial_delta)\n bytes_labels = pkl.dumps(labels)\n res = self.lower_layer_stub.UpdateDelta(nn_pb.BackwardMsg(batch_id=\n batch_id, partial_delta=bytes_delta, labels=bytes_labels))\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n\n def create_lower_stub(self):\n \"\"\" stub for lower layer communication\"\"\"\n if self.lower_layer_addr:\n channel = grpc.insecure_channel(self.lower_layer_addr)\n self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no lower layer has been specified')\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n\n def parse_forward_msg(self, req):\n \"\"\" extract and transform data in forward message\"\"\"\n batch_id = req.batch_id\n bytes_outputs_of_lower = req.output_matrix\n bytes_labels = req.labels\n is_train = req.is_train\n outputs_of_lower = pkl.loads(bytes_outputs_of_lower)\n labels = pkl.loads(bytes_labels)\n return batch_id, outputs_of_lower, labels, is_train\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-5": "import gzip\nimport pickle as pkl\nimport time\nfrom datetime import datetime\n\nimport grpc\nimport numpy as np\nfrom sklearn.utils import shuffle\n\nimport neural_nets_pb2 as nn_pb\nimport neural_nets_pb2_grpc as nn_pb_grpc\nfrom mnist_loader import load_data\nfrom activations import *\n\n\n# pylint: disable=too-many-arguments\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n \"\"\"\n abstract layer extract common methods\n \"\"\"\n # pylint: disable=too-many-arguments\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_nodes, current_layer_nodes,\n nonlin, nonlin_prime):\n \"\"\"\n datasets : the path of mnist dataset\n nonlin: activation function\n nonlin_prime: the derivative of activation function\n \"\"\"\n self.layer_name = layer_name\n self.upper_layer_addr = upper_layer\n self.lower_layer_addr = lower_layer\n self.nonlin = nonlin\n self.nonlin_prime = nonlin_prime\n\n # lazy initialization\n self.upper_layer_stub = None\n self.lower_layer_stub = None\n\n # weights dimension\n self.weights_shape = (current_layer_nodes, lower_layer_nodes)\n self.weights = None\n self.biases = None\n\n # record outputs from lower layer\n # use batch id as key\n # Purposes:\n # 1) used for computing the weighted sum of current layer\n # 2) used for computing the gradients for updating weights of current layer\n self.lower_layer_outputs = {}\n\n # computed from lower layer outputs for cache purpose\n # cache for computing delta for current layer\n # delta = partial_delta_rec * nonlin_prime(weighted_sum)\n # with different batch we have different weighted sum\n self.weighted_sum_inputs = {}\n\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n\n # convert numpy array to byte string\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n\n # send message to next layer\n res = self.upper_layer_stub.UpdateInput(\n nn_pb.ForwardMsg(batch_id=batch_id,\n output_matrix=bytes_matrix,\n labels=bytes_labels,\n is_train=istrain))\n # print(\"get response form upper layer\", res.message)\n\n\n def backward_to_lower(self, batch_id, partial_delta, labels):\n \"\"\"\n back propagate error partial_delta to lower layer\n partial_delta = dot(self.weights.T, self.delta)\n self.delta = delta_received_from_upper * nonlin_prime(z)\n \"\"\"\n # create stub for lower layer\n if not self.lower_layer_stub:\n self.create_lower_stub()\n\n # convert partial_delta matrix to bytes string\n bytes_delta = pkl.dumps(partial_delta)\n bytes_labels = pkl.dumps(labels)\n\n res = self.lower_layer_stub.UpdateDelta(\n nn_pb.BackwardMsg(batch_id=batch_id,\n partial_delta=bytes_delta,\n labels=bytes_labels))\n # print(\"get response from lower layer\", res.message)\n\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no upper layer has been specified\")\n\n\n def create_lower_stub(self):\n \"\"\" stub for lower layer communication\"\"\"\n if self.lower_layer_addr:\n channel = grpc.insecure_channel(self.lower_layer_addr)\n self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no lower layer has been specified\")\n\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member\n\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print(\"Weights of {} have not initialized\".format(self.layer_name))\n import sys\n sys.exit(-1)\n\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n\n # update biases\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n\n # compute gradients for weights\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n\n self.weights = self.weights - lr * gradients_avg\n\n\n def parse_forward_msg(self, req):\n \"\"\" extract and transform data in forward message\"\"\"\n batch_id = req.batch_id\n bytes_outputs_of_lower = req.output_matrix\n bytes_labels = req.labels\n is_train = req.is_train\n\n outputs_of_lower = pkl.loads(bytes_outputs_of_lower)\n labels = pkl.loads(bytes_labels)\n return batch_id, outputs_of_lower, labels, is_train\n\n\n # implementing rpc services\n def UpdateInput(self, request, context):\n # implemented in Hidden Layer and Output Layer\n pass\n\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name=\"input\"):\n super().__init__(layer_name, upper_layer,\n None, None, input_dim,\n None, None)\n\n self.train, self.val, self.test = load_data(data_path)\n\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1 # use negative number, diff with batch_id\n for i in range(epochs):\n print(\"Start feed {0} epoch data\".format(i))\n\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j+batch_size]\n minibatch_y = train_y[j:j+batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n\n # send test data for evaluation\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print(\"Should not have lower layer\")\n return nn_pb.PlainResponse(message=\"Wrong invoke!\")\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print(\"Complete backpropagation for batch {} at {}\".format(\n batch_id,\n datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return nn_pb.PlainResponse(message=\"Received at layer {}\".format(\n self.layer_name))\n\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name,\n upper_layer,\n lower_layer,\n lower_layer_size,\n layer_size,\n nonlin,\n nonlin_prime,\n learning_rate,\n enable_synthetic_gradients,\n sg_learning_rate\n ):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer,\n lower_layer, lower_layer_size,\n layer_size, nonlin,\n nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n\n A = self.sg_weights[0] #(n, n)\n B = self.sg_weights[1] #(10, n)\n C = self.sg_weights[2] #(1, n)\n\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n\n h = self.nonlin(weighted_sum)\n\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n\n self.sg_weights = [A, B, C]\n\n # del stored delta\n del self.sg_deltas[batch_id]\n\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n\n # get values from message\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)\n print(\"Get inputs id: {0}, matrix shape: {1}, labels shape: {2}\".format(\n batch_id, outputs_of_lower.shape, labels.shape))\n\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \\\n + self.biases.transpose()\n # saving inputs during training, because for weights updating\n if is_train:\n inputs = {'matrix': outputs_of_lower,\n 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n\n activations = self.nonlin(weighted_sum) # apply element wise\n\n # update weights immediately with SG, if enabled SG\n if self.enable_sg and is_train:\n print(\"update weights based on SG delta\")\n sg_delta = self.SG(activations, labels)\n # TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n\n # forward layer outputs\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print(\"batch id: {0}, activations shape {1}\".format(\n batch_id, activations.shape))\n\n # return received\n return nn_pb.PlainResponse(message=\"Inputs received by layer {}\".format(\n self.layer_name))\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels # variable currently not useful\n labels = pkl.loads(bytes_labels)\n\n # compute delta for current layer\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n\n # shape of delta: (batch_size, size_of_layer)\n delta = partial_delta * z_nonlin_prime\n\n # compute partial delta for lower layer\n partial_delta_for_lower = np.dot(delta, self.weights)\n # send partial delta to lower layer\n self.backward_to_lower(batch_id,\n partial_delta_for_lower,\n labels)\n\n if self.enable_sg:\n # train the SG\n # TODO pass partial delta instead\n self.update_sg_weights(delta, batch_id)\n else:\n # update weights regularly\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n\n\n # delete stored for weighted sum\n del self.weighted_sum_inputs[batch_id]\n # delete stored for lower layer outputs\n del self.lower_layer_outputs[batch_id]\n\n return nn_pb.PlainResponse(\n message=\"Partial delta received at {}\".format(self.layer_name))\n\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate ):\n super().__init__(layer_name, None,\n lower_layer,\n lower_layer_size,\n num_classes,\n None,\n None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)\n\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \\\n + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n # print(\"weighted sum\", weighted_sum)\n # print(\"outputs of lower\", outputs_of_lower)\n\n if is_train:\n delta = softmax_output - labels\n # compute delta for lower layer first\n # because current error is based on current weights\n partial_delta_for_lower = np.dot(delta, self.weights)\n # send to lower layer\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n\n # cross entropy loss\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels # pylint: disable=no-member\n # print(\"total loss: \", np.sum(total_loss))\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print(\"For batch id {}, avg loss: {}\".format(batch_id, loss))\n\n # update weights\n self.update_weights(self.lr, delta, outputs_of_lower)\n\n else:\n # test evaluation\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))\n print(\"Epoch {}, Performance test {} / {}\".format(\n -1*batch_id, matched, labels.shape[0]))\n\n\n return nn_pb.PlainResponse(message=\"Inputs received at {}\".format(\n self.layer_name))\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print(\"Error: No upper layer for output layer\")\n return nn_pb.PlainResponse(message=\"Invalid Operation!!\")\n",
"step-ids": [
24,
27,
28,
32,
35
]
}
|
[
24,
27,
28,
32,
35
] |
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import logging
from org.o3project.odenos.core.util.remote_object_interface import RemoteObjectInterface
from org.o3project.odenos.remoteobject.message.request import Request
from org.o3project.odenos.remoteobject.manager.system.component_connection import (
ComponentConnection
)
from org.o3project.odenos.remoteobject.manager.system.\
component_connection_logic_and_network import (
ComponentConnectionLogicAndNetwork)
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
# pylint: disable=R0923
class SystemManagerInterface(RemoteObjectInterface):
COMP_MNGS_PATH = "component_managers"
COMP_MNG_PATH = "component_managers/%s"
EVENT_MNG_PATH = "event_manager"
COMP_TYPES_PATH = "component_types"
COMP_TYPE_PATH = "component_types/%s"
COMPS_PATH = "components"
COMP_PATH = "components/%s"
CONNECTIONS_PATH = "connections"
CONNECTION_PATH = "connections/%s"
OBJECT_PATH = "objects/%s"
def __init__(self, dispatcher, source_object_id=None):
'''
NOTE: source_object_id is required for the ODENOS monitor tool.
'''
logging.debug("Create SystemManagerInterface ID:"
+ dispatcher.system_manager_id)
super(SystemManagerInterface, self).__init__(
dispatcher,
dispatcher.system_manager_id,
source_object_id)
@property
def system_manager_id(self):
return self.object_id
###################################
# Basic request
###################################
# GET Component Managers.
def get_component_managers(self):
logging.debug("GET ComponentManagers")
resp = self._get_object_to_remote_object(self.COMP_MNGS_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Event Manager.
def get_event_manager(self):
logging.debug("GET EventManager")
resp = self._get_object_to_remote_object(self.EVENT_MNG_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET ComponentTypes.
def get_component_types(self):
logging.debug("GET ComponentTypes")
resp = self._get_object_to_remote_object(self.COMP_TYPES_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Components.
def get_components(self):
logging.debug("GET Components")
resp = self._get_object_to_remote_object(self.COMPS_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Connections.
def get_connections(self):
logging.debug("GET Connections")
resp = self._get_object_to_remote_object(self.CONNECTIONS_PATH)
if resp.is_error(Request.Method.GET):
return None
connections = {}
try:
for conn_id, connection in resp.body.items():
if connection[ComponentConnection.OBJECT_TYPE] ==\
ComponentConnectionLogicAndNetwork.TYPE:
connections[conn_id] =\
ComponentConnectionLogicAndNetwork.create_from_packed(
connection)
else:
connections[conn_id] =\
ComponentConnection.create_from_packed(connection)
except KeyError, err:
logging.error("GET Connections Invalid Response Message"
+ " KeyError: " + str(err))
return None
return connections
# GET Component Manager.
def get_component_manager(self, comp_mgr_id):
logging.debug("GET ComponentManager ComponentMgrID:" + comp_mgr_id)
path = self.COMP_MNG_PATH % comp_mgr_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
def add_component_manager(self, compmgr):
logging.debug("object_property of ComponentManager %s is %s",
compmgr.object_id,
compmgr.object_property.packed_object)
path = "component_managers/%s" % compmgr.object_id
resp = self._put_object_to_remote_object(path, compmgr.object_property)
if resp.is_error(Request.Method.PUT):
logging.error("Failed registration to SystemManager.")
compmgr.set_state(ObjectProperty.State.ERROR)
return
logging.info("Complete ComponentManager registration to SystemManager.")
# GET ComponentType.
def get_component_type(self, comp_type):
logging.debug("GET ComponentType Type:" + comp_type)
path = self.COMP_TYPE_PATH % comp_type
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Component.
def get_component(self, comp_id):
logging.debug("GET Component ComponentID:" + comp_id)
path = self.COMP_PATH % comp_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Connection.
def get_connection(self, conn_id):
logging.debug("GET Connection ConnectionID:" + conn_id)
path = self.CONNECTION_PATH % conn_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
connection = None
try:
if resp.body[ComponentConnection.OBJECT_TYPE] ==\
ComponentConnectionLogicAndNetwork.TYPE:
connection =\
ComponentConnectionLogicAndNetwork.create_from_packed(
resp.body)
else:
connection =\
ComponentConnection.create_from_packed(resp.body)
except KeyError, err:
logging.error("GET Connection Invalid Response Message"
+ " KeyError: " + str(err))
return None
return connection
# GET Object.
def get_object(self, object_id):
logging.debug("GET Object ObjectID:" + object_id)
path = self.OBJECT_PATH % object_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# PUT Connection.
def put_connection(self, connection):
logging.debug("PUT Connection ConnectionID:" + connection.id)
path = self.CONNECTION_PATH % connection.id
return self._put_object_to_remote_object(path,
connection)
# PUT ComponentManagers.
def put_component_managers(self, property_):
logging.debug("PUT ComponentManagers")
path = self.COMP_MNG_PATH % property_.object_id
return self._put_object_to_remote_object(path,
property_)
# PUT Components.
def put_components(self, property_):
logging.debug("PUT Components")
path = self.COMP_PATH % property_.object_id
return self._put_object_to_remote_object(path,
property_)
# POST Components.
def post_components(self, property_):
logging.debug("POST Components")
return self._post_object_to_remote_object(self.COMPS_PATH,
property_)
# POST Connections.
def post_connections(self, connection):
logging.debug("POST Connections")
return self._post_object_to_remote_object(self.CONNECTIONS_PATH,
connection)
# DELETE ComponentManagers.
def del_component_managers(self, comp_mgr_id):
logging.debug("DELETE ComponentManagers ComponentMgrID:" + comp_mgr_id)
path = self.COMP_MNG_PATH % comp_mgr_id
return self._del_object_to_remote_object(path)
# DELETE Components.
def del_components(self, comp_id):
logging.debug("DELETE Components ComponentID:" + comp_id)
path = self.COMP_PATH % comp_id
return self._del_object_to_remote_object(path)
# DELETE Components.
def del_connections(self, conn_id):
logging.debug("DELETE Connections ConnectionID:" + conn_id)
path = self.CONNECTION_PATH % conn_id
return self._del_object_to_remote_object(path)
|
normal
|
{
"blob_id": "b220189d506737bf8cff9e600d1cfd4d7bc8435d",
"index": 1434,
"step-1": "# -*- coding:utf-8 -*-\n\n# Copyright 2015 NEC Corporation. #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n\nimport logging\n\nfrom org.o3project.odenos.core.util.remote_object_interface import RemoteObjectInterface\nfrom org.o3project.odenos.remoteobject.message.request import Request\nfrom org.o3project.odenos.remoteobject.manager.system.component_connection import (\n ComponentConnection\n)\nfrom org.o3project.odenos.remoteobject.manager.system.\\\n component_connection_logic_and_network import (\n ComponentConnectionLogicAndNetwork)\nfrom org.o3project.odenos.remoteobject.object_property import ObjectProperty\n\n\n# pylint: disable=R0923\nclass SystemManagerInterface(RemoteObjectInterface):\n COMP_MNGS_PATH = \"component_managers\"\n COMP_MNG_PATH = \"component_managers/%s\"\n EVENT_MNG_PATH = \"event_manager\"\n COMP_TYPES_PATH = \"component_types\"\n COMP_TYPE_PATH = \"component_types/%s\"\n COMPS_PATH = \"components\"\n COMP_PATH = \"components/%s\"\n CONNECTIONS_PATH = \"connections\"\n CONNECTION_PATH = \"connections/%s\"\n OBJECT_PATH = \"objects/%s\"\n\n def __init__(self, dispatcher, source_object_id=None):\n '''\n NOTE: source_object_id is required for the ODENOS monitor tool.\n '''\n logging.debug(\"Create SystemManagerInterface ID:\"\n + dispatcher.system_manager_id)\n super(SystemManagerInterface, self).__init__(\n dispatcher,\n dispatcher.system_manager_id,\n source_object_id)\n\n @property\n def system_manager_id(self):\n return self.object_id\n\n ###################################\n # Basic request\n ###################################\n # GET Component Managers.\n def get_component_managers(self):\n logging.debug(\"GET ComponentManagers\")\n resp = self._get_object_to_remote_object(self.COMP_MNGS_PATH)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET Event Manager.\n def get_event_manager(self):\n logging.debug(\"GET EventManager\")\n resp = self._get_object_to_remote_object(self.EVENT_MNG_PATH)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET ComponentTypes.\n def get_component_types(self):\n logging.debug(\"GET ComponentTypes\")\n resp = self._get_object_to_remote_object(self.COMP_TYPES_PATH)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET Components.\n def get_components(self):\n logging.debug(\"GET Components\")\n resp = self._get_object_to_remote_object(self.COMPS_PATH)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET Connections.\n def get_connections(self):\n logging.debug(\"GET Connections\")\n resp = self._get_object_to_remote_object(self.CONNECTIONS_PATH)\n if resp.is_error(Request.Method.GET):\n return None\n\n connections = {}\n try:\n for conn_id, connection in resp.body.items():\n if connection[ComponentConnection.OBJECT_TYPE] ==\\\n ComponentConnectionLogicAndNetwork.TYPE:\n connections[conn_id] =\\\n ComponentConnectionLogicAndNetwork.create_from_packed(\n connection)\n else:\n connections[conn_id] =\\\n ComponentConnection.create_from_packed(connection)\n except KeyError, err:\n logging.error(\"GET Connections Invalid Response Message\"\n + \" KeyError: \" + str(err))\n return None\n\n return connections\n\n # GET Component Manager.\n def get_component_manager(self, comp_mgr_id):\n logging.debug(\"GET ComponentManager ComponentMgrID:\" + comp_mgr_id)\n path = self.COMP_MNG_PATH % comp_mgr_id\n resp = self._get_object_to_remote_object(path)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n def add_component_manager(self, compmgr):\n logging.debug(\"object_property of ComponentManager %s is %s\",\n compmgr.object_id,\n compmgr.object_property.packed_object)\n\n path = \"component_managers/%s\" % compmgr.object_id\n resp = self._put_object_to_remote_object(path, compmgr.object_property)\n if resp.is_error(Request.Method.PUT):\n logging.error(\"Failed registration to SystemManager.\")\n compmgr.set_state(ObjectProperty.State.ERROR)\n return\n logging.info(\"Complete ComponentManager registration to SystemManager.\")\n\n # GET ComponentType.\n def get_component_type(self, comp_type):\n logging.debug(\"GET ComponentType Type:\" + comp_type)\n path = self.COMP_TYPE_PATH % comp_type\n resp = self._get_object_to_remote_object(path)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET Component.\n def get_component(self, comp_id):\n logging.debug(\"GET Component ComponentID:\" + comp_id)\n path = self.COMP_PATH % comp_id\n resp = self._get_object_to_remote_object(path)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # GET Connection.\n def get_connection(self, conn_id):\n logging.debug(\"GET Connection ConnectionID:\" + conn_id)\n path = self.CONNECTION_PATH % conn_id\n resp = self._get_object_to_remote_object(path)\n if resp.is_error(Request.Method.GET):\n return None\n\n connection = None\n try:\n if resp.body[ComponentConnection.OBJECT_TYPE] ==\\\n ComponentConnectionLogicAndNetwork.TYPE:\n connection =\\\n ComponentConnectionLogicAndNetwork.create_from_packed(\n resp.body)\n else:\n connection =\\\n ComponentConnection.create_from_packed(resp.body)\n except KeyError, err:\n logging.error(\"GET Connection Invalid Response Message\"\n + \" KeyError: \" + str(err))\n return None\n\n return connection\n\n # GET Object.\n def get_object(self, object_id):\n logging.debug(\"GET Object ObjectID:\" + object_id)\n path = self.OBJECT_PATH % object_id\n resp = self._get_object_to_remote_object(path)\n if resp.is_error(Request.Method.GET):\n return None\n\n return resp.body\n\n # PUT Connection.\n def put_connection(self, connection):\n logging.debug(\"PUT Connection ConnectionID:\" + connection.id)\n path = self.CONNECTION_PATH % connection.id\n return self._put_object_to_remote_object(path,\n connection)\n\n # PUT ComponentManagers.\n def put_component_managers(self, property_):\n logging.debug(\"PUT ComponentManagers\")\n path = self.COMP_MNG_PATH % property_.object_id\n return self._put_object_to_remote_object(path,\n property_)\n\n # PUT Components.\n def put_components(self, property_):\n logging.debug(\"PUT Components\")\n path = self.COMP_PATH % property_.object_id\n return self._put_object_to_remote_object(path,\n property_)\n\n # POST Components.\n def post_components(self, property_):\n logging.debug(\"POST Components\")\n return self._post_object_to_remote_object(self.COMPS_PATH,\n property_)\n\n # POST Connections.\n def post_connections(self, connection):\n logging.debug(\"POST Connections\")\n return self._post_object_to_remote_object(self.CONNECTIONS_PATH,\n connection)\n\n # DELETE ComponentManagers.\n def del_component_managers(self, comp_mgr_id):\n logging.debug(\"DELETE ComponentManagers ComponentMgrID:\" + comp_mgr_id)\n path = self.COMP_MNG_PATH % comp_mgr_id\n return self._del_object_to_remote_object(path)\n\n # DELETE Components.\n def del_components(self, comp_id):\n logging.debug(\"DELETE Components ComponentID:\" + comp_id)\n path = self.COMP_PATH % comp_id\n return self._del_object_to_remote_object(path)\n\n # DELETE Components.\n def del_connections(self, conn_id):\n logging.debug(\"DELETE Connections ConnectionID:\" + conn_id)\n path = self.CONNECTION_PATH % conn_id\n return self._del_object_to_remote_object(path)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
#实现同义词词林的规格化
with open('C:\\Users\\lenovo\\Desktop\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:
with open('convert.txt','a') as w:
for line in f:
data = line[8:-1].split()
for item in data:
tmp = data.copy()
tmp.remove(item)
tmp.insert(0,item)
w.writelines('\t'.join(tmp)+'\n')
|
normal
|
{
"blob_id": "9109e649a90730df022df898a7760140275ad724",
"index": 4854,
"step-1": "<mask token>\n",
"step-2": "with open('C:\\\\Users\\\\lenovo\\\\Desktop\\\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:\n with open('convert.txt', 'a') as w:\n for line in f:\n data = line[8:-1].split()\n for item in data:\n tmp = data.copy()\n tmp.remove(item)\n tmp.insert(0, item)\n w.writelines('\\t'.join(tmp) + '\\n')\n",
"step-3": "# -*- coding:utf-8 -*- \r\n#实现同义词词林的规格化\r\n\r\n\r\nwith open('C:\\\\Users\\\\lenovo\\\\Desktop\\\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:\r\n with open('convert.txt','a') as w:\r\n for line in f:\r\n \r\n data = line[8:-1].split()\r\n for item in data:\r\n tmp = data.copy()\r\n tmp.remove(item)\r\n tmp.insert(0,item)\r\n w.writelines('\\t'.join(tmp)+'\\n')",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#allisnone 20200403
#https://github.com/urllib3/urllib3/issues/1434
#https://github.com/dopstar/requests-ntlm2
#https://github.com/requests/requests-ntlm
#base on python3
#if you request https website, you need to add ASWG CA to following file:
#/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem
#ulimit –n 2000
#pip install requests_ntlm
import argparse
import re
import os
import csv
import string,sys,time,datetime
import requests
from requests_toolbelt.adapters import source
#from requests_ntlm import HttpNtlmAuth
import random
import subprocess
#import zthreads
def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
i = random.randint(start,end)
return prefix + str(i)
def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
sequences = []
for i in range(start,end+1):
sequences.append(prefix+str(i))
if num> len(sequences):
num = len(sequences)
choices = random.sample(sequences,num)
return choices
def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):
curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #等待超时
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-失败: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #无协议头部,默认加http协议
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='该Python3脚本用于ASWG做并发认证测试。\n 1、使用方法示例:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='认证并发测试的测试次数,默认1轮测试即停止')
parser.add_argument('-s','--starttime', type=str, default='',help='首次认证并发测试的时间,如 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='认证缓存过期时间,默认600秒')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='客户端IP前缀,默认只支持C段;其他方式自行适配')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='是否使用相同URL测试')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='当指定使用相同URL时,指定是http还是https请求')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urls来源文件')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urls来源文件中字段序号,默认从0开始')
parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth 用户的序号,默认从0开始')
parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth 用户数量')
parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='开始的子网卡序号,默认从0开始')
parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='子网卡接口数量,每个接口一个IP地址')
parser.add_argument('-d','--is-debug', type=bool, default=False,help='是否开启curl的打印日志')
args = parser.parse_args()
max_round = args.round
first_schedule_time = args.starttime
now = datetime.datetime.now()
now_str = now.strftime("%H:%M:%S")
if first_schedule_time:
if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str:
pass
else:
print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')
sys.exit()
else:
nexttime = now + datetime.timedelta(seconds=60)
first_schedule_time = nexttime.strftime("%H:%M:%S")
auth_cache_timeout = args.auth_cache_timeout
proxy = args.aswg_proxy
ip_prefix = args.ip_prefix
is_same_url = args.is_same_url
is_same_url = True
url_file = args.url_file
url_index = args.url_index
start_user_index = args.start_user_index
user_num = args.user_num
start_eth0_index = args.start_eth0_index
sub_eth0_num = args.sub_eth0_num
is_debug = args.is_debug
urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.')
#print('urls=',urls)
#url = 'https://www.baidu.com'
print('urls_len=',len(urls))
#urls = urls[:300]
print('urls_len=',len(urls))
#from zthreads.threadpools.threadpools import Threadpools
#thread_pool = Threadpools(5)
i = 0
#unique_users = 1275
user_start = start_user_index
user_num = user_num
sub_eth_start = start_eth0_index
eth_num = sub_eth0_num
cert = 'rootCA.cer'
is_http = True
#first_schedule_time = "16:45:00"
#auth_cache_timeout = 60
#max_round = 2
print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout))
round_num = 0
while True:
#time_now = time.strftime("%H:%M:%S", time.localtime())
now = datetime.datetime.now()
time_now = now.strftime("%H:%M:%S")
if time_now == first_schedule_time:
print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num))
start_time = time.time()
urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num,
ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug)
total_sending_time_seconds = time.time() - start_time
print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num))
round_num = round_num + 1
if round_num >= max_round:
print("-" * 50)
print('Finished all test with {0} rounds!!!'.format(max_round))
break
else:
print("-" * 50)
print('Please make sure clear cache before the next schedule time!!!')
#now = datetime.datetime.now()
#date_str = now.strftime("%Y-%m-%d ")
#last_schedule_time_str = date_str + first_schedule_time
last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S')
nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds
first_schedule_time = nexttime.strftime("%H:%M:%S")
print('Next_schedule_time={0}...'.format(first_schedule_time))
#time.sleep(sleep_time)
else:
#print('time_now=',time_now)
pass
#thread_pool.close()
#initial_requests_session(ip=ip,user=ntlm_user)
|
normal
|
{
"blob_id": "a7fae2da8abba6e05b4fc90dec8826194d189853",
"index": 2758,
"step-1": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\ndef urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,\n user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert\n ='rootCA.cer', is_same_url=False, is_http=False, debug=False):\n \"\"\"\n one ip/eth<--> one user\n \"\"\"\n i = 0\n for i in range(max(user_num, eth_num)):\n url = ''\n if is_same_url:\n if is_http:\n url = 'http://172.16.0.1'\n else:\n url = 'https://www.baidu.com'\n user_index = i % user_num + user_start\n eth_index = i % eth_num + sub_eth_start\n user = 'userg' + str(user_index)\n eth = 'eth0:' + str(eth_index)\n \"\"\" For debug\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\n print('ip_{0}={1}'.format(i,ip))\n print('eth=',eth)\n print('user=',user)\n print(\"-\" * 50)\n \"\"\"\n system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http\n =is_http, debug=debug)\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\ndef urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,\n user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert\n ='rootCA.cer', is_same_url=False, is_http=False, debug=False):\n \"\"\"\n one ip/eth<--> one user\n \"\"\"\n i = 0\n for i in range(max(user_num, eth_num)):\n url = ''\n if is_same_url:\n if is_http:\n url = 'http://172.16.0.1'\n else:\n url = 'https://www.baidu.com'\n user_index = i % user_num + user_start\n eth_index = i % eth_num + sub_eth_start\n user = 'userg' + str(user_index)\n eth = 'eth0:' + str(eth_index)\n \"\"\" For debug\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\n print('ip_{0}={1}'.format(i,ip))\n print('eth=',eth)\n print('user=',user)\n print(\"-\" * 50)\n \"\"\"\n system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http\n =is_http, debug=debug)\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n \"\"\"该Python3脚本用于ASWG做并发认证测试。\n 1、使用方法示例:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080\"\"\"\n )\n parser.add_argument('-r', '--round', type=int, default=1, help=\n '认证并发测试的测试次数,默认1轮测试即停止')\n parser.add_argument('-s', '--starttime', type=str, default='', help=\n '首次认证并发测试的时间,如 16:20:60')\n parser.add_argument('-t', '--auth-cache-timeout', type=int, default=600,\n help='认证缓存过期时间,默认600秒')\n parser.add_argument('-p', '--aswg-proxy', type=str, default=\n '172.17.33.23:8080', help='ASWG proxy')\n parser.add_argument('-i', '--ip-prefix', type=str, default='172.18.1.',\n help='客户端IP前缀,默认只支持C段;其他方式自行适配')\n parser.add_argument('-u', '--is-same-url', type=bool, default=True,\n help='是否使用相同URL测试')\n parser.add_argument('-u1', '--is-http', type=bool, default=True, help=\n '当指定使用相同URL时,指定是http还是https请求')\n parser.add_argument('-f', '--url-file', type=str, default=\n 'hwurls_top10w.txt', help='urls来源文件')\n parser.add_argument('-f1', '--url-index', type=int, default=0, help=\n 'urls来源文件中字段序号,默认从0开始')\n parser.add_argument('-a0', '--start-user-index', type=int, default=0,\n help='auth 用户的序号,默认从0开始')\n parser.add_argument('-a1', '--user-num', type=int, default=1275, help=\n 'auth 用户数量')\n parser.add_argument('-e0', '--start-eth0-index', type=int, default=0,\n help='开始的子网卡序号,默认从0开始')\n parser.add_argument('-e1', '--sub-eth0-num', type=int, default=1275,\n help='子网卡接口数量,每个接口一个IP地址')\n parser.add_argument('-d', '--is-debug', type=bool, default=False, help=\n '是否开启curl的打印日志')\n args = parser.parse_args()\n max_round = args.round\n first_schedule_time = args.starttime\n now = datetime.datetime.now()\n now_str = now.strftime('%H:%M:%S')\n if first_schedule_time:\n if len(first_schedule_time) == 8 and len(first_schedule_time.split(':')\n ) == 3 and first_schedule_time > now_str:\n pass\n else:\n print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')\n sys.exit()\n else:\n nexttime = now + datetime.timedelta(seconds=60)\n first_schedule_time = nexttime.strftime('%H:%M:%S')\n auth_cache_timeout = args.auth_cache_timeout\n proxy = args.aswg_proxy\n ip_prefix = args.ip_prefix\n is_same_url = args.is_same_url\n is_same_url = True\n url_file = args.url_file\n url_index = args.url_index\n start_user_index = args.start_user_index\n user_num = args.user_num\n start_eth0_index = args.start_eth0_index\n sub_eth0_num = args.sub_eth0_num\n is_debug = args.is_debug\n urls = get_urls_from_file(from_file=url_file, url_index=url_index,\n spliter=',', pre_www='www.')\n print('urls_len=', len(urls))\n print('urls_len=', len(urls))\n i = 0\n user_start = start_user_index\n user_num = user_num\n sub_eth_start = start_eth0_index\n eth_num = sub_eth0_num\n cert = 'rootCA.cer'\n is_http = True\n print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.\n format(max_round, first_schedule_time, auth_cache_timeout))\n round_num = 0\n while True:\n now = datetime.datetime.now()\n time_now = now.strftime('%H:%M:%S')\n if time_now == first_schedule_time:\n print('This_schedule_time={0}, round={1}'.format(\n first_schedule_time, round_num))\n start_time = time.time()\n urls_resquests(urls, proxy=proxy, user_start=user_start,\n user_num=user_num, sub_eth_start=sub_eth_start, eth_num=\n eth_num, ip_prefix=ip_prefix, cert=cert, is_same_url=\n is_same_url, is_http=is_http, debug=is_debug)\n total_sending_time_seconds = time.time() - start_time\n print(\n 'total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'\n .format(total_sending_time_seconds, round_num))\n round_num = round_num + 1\n if round_num >= max_round:\n print('-' * 50)\n print('Finished all test with {0} rounds!!!'.format(max_round))\n break\n else:\n print('-' * 50)\n print(\n 'Please make sure clear cache before the next schedule time!!!'\n )\n last_schedule_time = datetime.datetime.strptime(now.\n strftime('%Y-%m-%d ') + first_schedule_time,\n '%Y-%m-%d %H:%M:%S')\n nexttime = last_schedule_time + datetime.timedelta(seconds=\n auth_cache_timeout + 60)\n first_schedule_time = nexttime.strftime('%H:%M:%S')\n print('Next_schedule_time={0}...'.format(first_schedule_time))\n else:\n pass\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#allisnone 20200403\r\n#https://github.com/urllib3/urllib3/issues/1434\r\n#https://github.com/dopstar/requests-ntlm2\r\n#https://github.com/requests/requests-ntlm\r\n\r\n#base on python3\r\n#if you request https website, you need to add ASWG CA to following file:\r\n#/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem\r\n#ulimit –n 2000\r\n#pip install requests_ntlm\r\nimport argparse\r\nimport re\r\nimport os\r\nimport csv\r\nimport string,sys,time,datetime\r\nimport requests\r\nfrom requests_toolbelt.adapters import source\r\n#from requests_ntlm import HttpNtlmAuth\r\nimport random\r\nimport subprocess\r\n#import zthreads\r\n\r\ndef get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'):\r\n if type=='ip' and max(start,end)>255:\r\n end = 255\r\n i = random.randint(start,end)\r\n return prefix + str(i)\r\n\r\ndef get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'):\r\n if type=='ip' and max(start,end)>255:\r\n end = 255\r\n sequences = []\r\n for i in range(start,end+1):\r\n sequences.append(prefix+str(i))\r\n if num> len(sequences):\r\n num = len(sequences)\r\n choices = random.sample(sequences,num)\r\n return choices\r\n\r\n\r\n\r\ndef popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):\r\n curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(\r\n cert,eth,user,proxy,url)\r\n subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding=\"utf-8\")\r\n try:\r\n subp.wait(2) #等待超时\r\n except Exception as e:\r\n print('curl_request_timeout, error: ',e)\r\n return\r\n if subp.poll() == 0:\r\n print(subp.communicate()[1])\r\n else:\r\n print(\"curl_request-失败: \",curl_cmd)\r\n return\r\n\r\ndef system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):\r\n \"\"\"\r\n -I: header request\r\n -k: skip ssl\r\n --no-keepalive, keepalive=close\r\n \"\"\"\r\n curl_cmd = ''\r\n debug = False\r\n if is_http:\r\n basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\r\n if debug:\r\n pass\r\n else:\r\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\r\n curl_cmd = basic_cmd.format(eth,user,proxy,url)\r\n else:\r\n basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\r\n if debug:\r\n pass\r\n else:\r\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\r\n curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)\r\n try:\r\n os_p = os.system(curl_cmd)\r\n print('curl_cmd=',curl_cmd)\r\n except Exception as e:\r\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))\r\n return\r\n\r\ndef get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):\r\n \"\"\"\r\n 用于url分类测试,测试文件中存放大量的url地址\r\n :param from_file: str \r\n :return: list, URL_list(Generator)\r\n \"\"\"\r\n txtfile = open(from_file, 'r',encoding='utf-8')\r\n url_list = txtfile.readlines()\r\n for i in range(0,len(url_list)):\r\n url_list[i] = url_list[i].replace('\\n','')\r\n # print(url_list[i])\r\n if url_index>=0:\r\n url_var = url_list[i].split(spliter)[url_index].replace(' ','')\r\n #print('url_var=',url_var)\r\n protocol_header = url_var[:9].lower()\r\n if pre_www not in url_var and not (\"http://\" in protocol_header or \"https://\" in protocol_header or \"ftp://\" in protocol_header):\r\n url_var = pre_www + url_var\r\n url_list[i] = url_var\r\n protocol_header = url_list[i][:9].lower()\r\n #print('protocol_header=',protocol_header)\r\n if \"http://\" in protocol_header or \"https://\" in protocol_header or \"ftp://\" in protocol_header:\r\n pass \r\n else: #无协议头部,默认加http协议\r\n url_list[i] = \"https://\" + url_list[i]\r\n return url_list \r\n\r\n\r\ndef get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):\r\n \"\"\"\r\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\r\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\r\n sequence: start with 0\r\n eth_num: eth sequence start with 0\r\n \"\"\"\r\n user_index = sequence % user_num + user_start\r\n eth_index = sequence % eth_num + eth_start\r\n \"\"\"\r\n user_index = sequence\r\n if sequence>user_num: #循环,复用,取余\r\n user_index = sequence % user_num + user_start\r\n eth_index = sequence\r\n if eth_index>eth_num: #循环,复用,取余\r\n eth_index = eth_index % eth_num + eth_start\r\n \"\"\"\r\n return user_index,eth_index\r\n\r\ndef callback():\r\n return\r\n\r\n\r\ndef urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253, \r\n ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):\r\n \"\"\"\r\n one ip/eth<--> one user\r\n \"\"\"\r\n i = 0\r\n #count = max(len(urls),user_num,eth_num)\r\n #for url in urls:\r\n for i in range(max(user_num,eth_num)):\r\n url = ''\r\n if is_same_url:\r\n if is_http:\r\n url = 'http://172.16.0.1' #use the same url for request test\r\n else:\r\n url = 'https://www.baidu.com'\r\n user_index = i % user_num + user_start\r\n eth_index = i % eth_num + sub_eth_start\r\n \r\n #ip = get_random_ip_or_user(start=2,end=254)\r\n \r\n #ip = ip_prefix + str(eth_index + 1)\r\n \r\n #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')\r\n user = 'userg'+str(user_index)\r\n #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')\r\n eth = 'eth0:'+str(eth_index)\r\n \"\"\" For debug\r\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\r\n print('ip_{0}={1}'.format(i,ip))\r\n print('eth=',eth)\r\n print('user=',user)\r\n print(\"-\" * 50)\r\n \"\"\"\r\n #thread_pool.put(system_curl_request, (url,user,eth,), callback)\r\n #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')\r\n #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')\r\n system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)\r\n #i = i + 1\r\n return\r\n \r\n \r\n#\"\"\"\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='该Python3脚本用于ASWG做并发认证测试。\\n 1、使用方法示例:\\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') \r\n parser.add_argument('-r','--round', type=int, default=1,help='认证并发测试的测试次数,默认1轮测试即停止')\r\n parser.add_argument('-s','--starttime', type=str, default='',help='首次认证并发测试的时间,如 16:20:60')\r\n parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='认证缓存过期时间,默认600秒')\r\n parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')\r\n parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='客户端IP前缀,默认只支持C段;其他方式自行适配')\r\n parser.add_argument('-u','--is-same-url', type=bool, default=True,help='是否使用相同URL测试')\r\n parser.add_argument('-u1','--is-http', type=bool, default=True,help='当指定使用相同URL时,指定是http还是https请求')\r\n parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urls来源文件')\r\n parser.add_argument('-f1','--url-index', type=int, default=0,help='urls来源文件中字段序号,默认从0开始')\r\n parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth 用户的序号,默认从0开始')\r\n parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth 用户数量')\r\n parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='开始的子网卡序号,默认从0开始')\r\n parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='子网卡接口数量,每个接口一个IP地址')\r\n parser.add_argument('-d','--is-debug', type=bool, default=False,help='是否开启curl的打印日志')\r\n args = parser.parse_args()\r\n max_round = args.round\r\n first_schedule_time = args.starttime\r\n now = datetime.datetime.now()\r\n now_str = now.strftime(\"%H:%M:%S\")\r\n if first_schedule_time:\r\n if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str:\r\n pass\r\n else:\r\n print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')\r\n sys.exit()\r\n else:\r\n nexttime = now + datetime.timedelta(seconds=60)\r\n first_schedule_time = nexttime.strftime(\"%H:%M:%S\")\r\n \r\n auth_cache_timeout = args.auth_cache_timeout\r\n proxy = args.aswg_proxy\r\n ip_prefix = args.ip_prefix\r\n is_same_url = args.is_same_url\r\n is_same_url = True\r\n url_file = args.url_file\r\n url_index = args.url_index\r\n start_user_index = args.start_user_index\r\n user_num = args.user_num\r\n start_eth0_index = args.start_eth0_index\r\n sub_eth0_num = args.sub_eth0_num\r\n is_debug = args.is_debug\r\n urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.')\r\n #print('urls=',urls)\r\n #url = 'https://www.baidu.com'\r\n print('urls_len=',len(urls))\r\n \r\n #urls = urls[:300]\r\n print('urls_len=',len(urls))\r\n #from zthreads.threadpools.threadpools import Threadpools\r\n #thread_pool = Threadpools(5)\r\n i = 0\r\n #unique_users = 1275\r\n user_start = start_user_index\r\n user_num = user_num\r\n sub_eth_start = start_eth0_index\r\n eth_num = sub_eth0_num\r\n cert = 'rootCA.cer'\r\n is_http = True\r\n #first_schedule_time = \"16:45:00\"\r\n #auth_cache_timeout = 60\r\n #max_round = 2\r\n print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout))\r\n round_num = 0\r\n while True:\r\n #time_now = time.strftime(\"%H:%M:%S\", time.localtime())\r\n now = datetime.datetime.now()\r\n time_now = now.strftime(\"%H:%M:%S\")\r\n if time_now == first_schedule_time: \r\n print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num))\r\n start_time = time.time()\r\n urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, \r\n ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug)\r\n total_sending_time_seconds = time.time() - start_time \r\n print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num))\r\n round_num = round_num + 1\r\n if round_num >= max_round:\r\n print(\"-\" * 50)\r\n print('Finished all test with {0} rounds!!!'.format(max_round))\r\n break\r\n else:\r\n print(\"-\" * 50)\r\n print('Please make sure clear cache before the next schedule time!!!')\r\n #now = datetime.datetime.now()\r\n #date_str = now.strftime(\"%Y-%m-%d \")\r\n #last_schedule_time_str = date_str + first_schedule_time\r\n last_schedule_time = datetime.datetime.strptime(now.strftime(\"%Y-%m-%d \") + first_schedule_time,'%Y-%m-%d %H:%M:%S')\r\n nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds\r\n first_schedule_time = nexttime.strftime(\"%H:%M:%S\")\r\n print('Next_schedule_time={0}...'.format(first_schedule_time))\r\n #time.sleep(sleep_time)\r\n else:\r\n #print('time_now=',time_now)\r\n pass\r\n \r\n \r\n #thread_pool.close() \r\n #initial_requests_session(ip=ip,user=ntlm_user)\r\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
# the main program of this project
import log
import logging
import os
from ast_modifier import AstModifier
from analyzer import Analyzer
class Demo():
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
# get origin AST
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
# simplify the AST
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
# analyse
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
if __name__ == "__main__":
import sys
main(sys.argv)
|
normal
|
{
"blob_id": "e989f73011559080f96802dba4db30361d5626f9",
"index": 4002,
"step-1": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv)\n",
"step-4": "import log\nimport logging\nimport os\nfrom ast_modifier import AstModifier\nfrom analyzer import Analyzer\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv)\n",
"step-5": "# the main program of this project\nimport log\nimport logging\nimport os\nfrom ast_modifier import AstModifier\nfrom analyzer import Analyzer\n\nclass Demo():\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n # get origin AST\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n # simplify the AST\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n\n # analyse\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from LinkedList import LinkedList
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
h1 = l1
v1 = 0
while h1:
v1 = v1 * 10 + h1.val
h1 = h1.next
h2 = l2
v2 = 0
while h2:
v2 = v2 * 10 + h2.val
h2 = h2.next
val = str(v1 + v2)
dummy = curr = ListNode(0)
for i in val:
curr.next = ListNode(int(i))
curr = curr.next
return dummy.next
l11, l22 = [7, 2, 4, 3], [5, 6, 4]
l1 = LinkedList(l11).getHead()
l2 = LinkedList(l22).getHead()
sl = Solution()
head = sl.addTwoNumbers(l1, l2)
LinkedList([1]).printLinkedList(head)
|
normal
|
{
"blob_id": "0f3ecd0a7189f57fdbda2360f6e39bd6101e2fdb",
"index": 7435,
"step-1": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\n<mask token>\nLinkedList([1]).printLinkedList(head)\n",
"step-3": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\nl11, l22 = [7, 2, 4, 3], [5, 6, 4]\nl1 = LinkedList(l11).getHead()\nl2 = LinkedList(l22).getHead()\nsl = Solution()\nhead = sl.addTwoNumbers(l1, l2)\nLinkedList([1]).printLinkedList(head)\n",
"step-4": "from LinkedList import LinkedList\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\nl11, l22 = [7, 2, 4, 3], [5, 6, 4]\nl1 = LinkedList(l11).getHead()\nl2 = LinkedList(l22).getHead()\nsl = Solution()\nhead = sl.addTwoNumbers(l1, l2)\nLinkedList([1]).printLinkedList(head)\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
# 2019/10/08 2019년10월8일
ss = input('날짜: 년/월/일 입력-> ')
sslist = ss.split('/')
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
year = int(sslist[0]) + 10
print(str(year) + "년", end='')
print(sslist[1] + "월", end='')
print(sslist[2] + "일")
|
normal
|
{
"blob_id": "fb2ef5a90b6e2582450726905868dd1b78e36166",
"index": 5008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\n<mask token>\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-3": "ss = input('날짜: 년/월/일 입력-> ')\nsslist = ss.split('/')\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\nyear = int(sslist[0]) + 10\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-4": "# 2019/10/08 2019년10월8일\r\n\r\nss = input('날짜: 년/월/일 입력-> ')\r\n\r\nsslist = ss.split('/')\r\nprint(sslist)\r\n\r\nprint('입력하신 날짜의 10년 후 -> ', end='')\r\nyear = int(sslist[0]) + 10\r\nprint(str(year) + \"년\", end='')\r\nprint(sslist[1] + \"월\", end='')\r\nprint(sslist[2] + \"일\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/C:\Program Files (x86)\Python35-32
#importar librarias necesarias
from urllib.request import urlopen
from bs4 import BeautifulSoup
|
normal
|
{
"blob_id": "7a59c8c883a9aaa723175783e01aa62e23503fde",
"index": 376,
"step-1": "<mask token>\n",
"step-2": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n",
"step-3": "#!/C:\\Program Files (x86)\\Python35-32\n\n#importar librarias necesarias\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import ttk
import Tkinter as tk
from rwb.runner.log import RobotLogTree, RobotLogMessages
from rwb.lib import AbstractRwbGui
from rwb.widgets import Statusbar
from rwb.runner.listener import RemoteRobotListener
NAME = "monitor"
HELP_URL="https://github.com/boakley/robotframework-workbench/wiki/rwb.monitor-User-Guide"
DEFAULT_SETTINGS = {
NAME: {
"port": 8910,
"host": "localhost",
}
}
class MonitorApp(AbstractRwbGui):
def __init__(self):
AbstractRwbGui.__init__(self, NAME, DEFAULT_SETTINGS)
self.wm_geometry("900x500")
port = self.get_setting("monitor.port")
print "using port", port
self.listener = RemoteRobotListener(self, port=port, callback=self._listen)
self.wm_title("rwb.monitor port: %s" % self.listener.port)
self._create_menubar()
self._create_statusbar()
self._create_notebook()
self.stack = []
self.event_id = 0
# self.status_label.configure(text="port: %s" % self.listener.port)
def _create_menubar(self):
self.menubar = tk.Menu(self)
self.configure(menu=self.menubar)
self.file_menu = tk.Menu(self.menubar, tearoff=False)
self.file_menu.add_command(label="Exit", command=self._on_exit)
self.help_menu = tk.Menu(self, tearoff=False)
self.help_menu.add_command(label="View help on the web", command=self._on_view_help)
self.help_menu.add_separator()
self.help_menu.add_command(label="About the robotframework workbench", command=self._on_about)
self.menubar.add_cascade(menu=self.file_menu, label="File", underline=0)
self.menubar.add_cascade(menu=self.help_menu, label="Help", underline=0)
def _on_view_help(self):
import webbrowser
webbrowser.open(HELP_URL)
def _on_exit(self):
self.destroy()
def _create_statusbar(self):
self.statusbar = Statusbar(self)
self.statusbar.pack(side="bottom", fill="x")
self.statusbar.add_section("port",12, "port %s" % self.listener.port)
self.statusbar.add_progress(mode="indeterminate")
# grip = ttk.Sizegrip(self.statusbar)
# grip.pack(side="right")
# self.status_label = ttk.Label(self.statusbar, text="", anchor="w")
# self.status_label.pack(side="left", fill="both", expand="true", padx=8)
# self.statusbar.pack(side="bottom", fill="x")
def _create_notebook(self):
self.notebook = ttk.Notebook(self)
self.notebook.pack(side="top", fill="both", expand=True)
self.log_tree = RobotLogTree(self.notebook, auto_open=("failed","suite","test","keyword"))
self.log_messages = RobotLogMessages(self.notebook)
self.notebook.add(self.log_tree, text="Details")
self.notebook.add(self.log_messages, text="Messages")
self.notebook.pack(side="top", fill="both", expand=True)
self.listeners = (self.log_tree, self.log_messages)
def _listen(self, cmd, *args):
self.event_id += 1
for listener in self.listeners:
listener.listen(self.event_id, cmd, args)
if cmd in ("start_test", "start_suite", "start_keyword"):
name = args[0]
cmd_type = cmd.split("_")[1]
self.stack.append((cmd_type, name))
self.update_display()
elif cmd in ("end_test", "end_suite", "end_keyword"):
cmd_type = cmd.split("_")[1]
self.stack.pop()
self.update_display()
def update_display(self):
if len(self.stack) == 1:
self.statusbar.progress_start()
elif len(self.stack) == 0:
self.statusbar.progress_stop()
s = ".".join([x[1] for x in self.stack]).strip()
self.statusbar.message(s, clear=True, lifespan=0)
if __name__ == "__main__":
app = MonitorApp()
app.mainloop()
|
normal
|
{
"blob_id": "572d58eec652207e6ec5a5e1d4c2f4310f2a70f3",
"index": 1665,
"step-1": "import ttk\nimport Tkinter as tk\nfrom rwb.runner.log import RobotLogTree, RobotLogMessages\nfrom rwb.lib import AbstractRwbGui\nfrom rwb.widgets import Statusbar\n\nfrom rwb.runner.listener import RemoteRobotListener\n\nNAME = \"monitor\"\nHELP_URL=\"https://github.com/boakley/robotframework-workbench/wiki/rwb.monitor-User-Guide\"\nDEFAULT_SETTINGS = {\n NAME: {\n \"port\": 8910,\n \"host\": \"localhost\",\n }\n }\n\nclass MonitorApp(AbstractRwbGui):\n def __init__(self):\n AbstractRwbGui.__init__(self, NAME, DEFAULT_SETTINGS)\n self.wm_geometry(\"900x500\")\n port = self.get_setting(\"monitor.port\")\n print \"using port\", port\n self.listener = RemoteRobotListener(self, port=port, callback=self._listen)\n self.wm_title(\"rwb.monitor port: %s\" % self.listener.port)\n self._create_menubar()\n self._create_statusbar()\n self._create_notebook()\n self.stack = []\n self.event_id = 0\n# self.status_label.configure(text=\"port: %s\" % self.listener.port)\n\n def _create_menubar(self):\n self.menubar = tk.Menu(self)\n self.configure(menu=self.menubar)\n\n self.file_menu = tk.Menu(self.menubar, tearoff=False)\n self.file_menu.add_command(label=\"Exit\", command=self._on_exit)\n\n self.help_menu = tk.Menu(self, tearoff=False)\n self.help_menu.add_command(label=\"View help on the web\", command=self._on_view_help)\n self.help_menu.add_separator()\n self.help_menu.add_command(label=\"About the robotframework workbench\", command=self._on_about)\n\n self.menubar.add_cascade(menu=self.file_menu, label=\"File\", underline=0)\n self.menubar.add_cascade(menu=self.help_menu, label=\"Help\", underline=0)\n\n def _on_view_help(self):\n import webbrowser\n webbrowser.open(HELP_URL)\n\n def _on_exit(self):\n self.destroy()\n\n def _create_statusbar(self):\n self.statusbar = Statusbar(self)\n self.statusbar.pack(side=\"bottom\", fill=\"x\")\n self.statusbar.add_section(\"port\",12, \"port %s\" % self.listener.port)\n self.statusbar.add_progress(mode=\"indeterminate\")\n # grip = ttk.Sizegrip(self.statusbar)\n # grip.pack(side=\"right\")\n # self.status_label = ttk.Label(self.statusbar, text=\"\", anchor=\"w\")\n # self.status_label.pack(side=\"left\", fill=\"both\", expand=\"true\", padx=8)\n # self.statusbar.pack(side=\"bottom\", fill=\"x\")\n\n def _create_notebook(self):\n self.notebook = ttk.Notebook(self)\n self.notebook.pack(side=\"top\", fill=\"both\", expand=True)\n self.log_tree = RobotLogTree(self.notebook, auto_open=(\"failed\",\"suite\",\"test\",\"keyword\"))\n self.log_messages = RobotLogMessages(self.notebook)\n self.notebook.add(self.log_tree, text=\"Details\")\n self.notebook.add(self.log_messages, text=\"Messages\")\n self.notebook.pack(side=\"top\", fill=\"both\", expand=True)\n self.listeners = (self.log_tree, self.log_messages)\n\n def _listen(self, cmd, *args):\n self.event_id += 1\n for listener in self.listeners:\n listener.listen(self.event_id, cmd, args)\n\n if cmd in (\"start_test\", \"start_suite\", \"start_keyword\"):\n name = args[0]\n cmd_type = cmd.split(\"_\")[1]\n self.stack.append((cmd_type, name))\n self.update_display()\n elif cmd in (\"end_test\", \"end_suite\", \"end_keyword\"):\n cmd_type = cmd.split(\"_\")[1]\n self.stack.pop()\n self.update_display()\n\n def update_display(self):\n if len(self.stack) == 1:\n self.statusbar.progress_start()\n elif len(self.stack) == 0:\n self.statusbar.progress_stop()\n\n s = \".\".join([x[1] for x in self.stack]).strip()\n self.statusbar.message(s, clear=True, lifespan=0)\n\nif __name__ == \"__main__\":\n app = MonitorApp()\n app.mainloop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__author__ = 'Vicio'
from Conexion.conexion import Conexion
class ConexionList():
def __init__(self):
self.conexion = Conexion()
def selectClientes(self):
pass
def selectProveedores(self):
pass
|
normal
|
{
"blob_id": "6b4af452778bdf13ac18e8d260cf1c9176ca95e0",
"index": 8414,
"step-1": "<mask token>\n\n\nclass ConexionList:\n <mask token>\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-3": "__author__ = 'Vicio'\n<mask token>\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-4": "__author__ = 'Vicio'\nfrom Conexion.conexion import Conexion\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-5": "__author__ = 'Vicio'\nfrom Conexion.conexion import Conexion\n\n\nclass ConexionList():\n\n def __init__(self):\n self.conexion = Conexion()\n\n\n\n def selectClientes(self):\n pass\n\n\n def selectProveedores(self):\n pass\n\n\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import matplotlib.pyplot as plt
from partisan_symmetry_noplot import partisan_symmetry
for k in range(1,100):
a=[]
for i in range(1,100):
a.append([])
for j in range(1,100):
a[i-1].append(partisan_symmetry([5*i/100,.20,5*j/100],1000,False))
plt.imshow(a)
plt.colorbar()
plt.xticks(range(99),[x/20 for x in range(1,100)])
plt.yticks(range(99),[x/20 for x in range(1,100)])
plt.title("Partisan Symmetry Difference for (x,"+str(k)+",y)")
plt.savefig("./ps"+str(k)+".png")
plt.close()
print("figure",k,"done")
|
normal
|
{
"blob_id": "cfa0937f1c49b52283c562d9ab1cb0542e71b990",
"index": 5970,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k in range(1, 100):\n a = []\n for i in range(1, 100):\n a.append([])\n for j in range(1, 100):\n a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / \n 100], 1000, False))\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.yticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)')\n plt.savefig('./ps' + str(k) + '.png')\n plt.close()\n print('figure', k, 'done')\n",
"step-3": "import matplotlib.pyplot as plt\nfrom partisan_symmetry_noplot import partisan_symmetry\nfor k in range(1, 100):\n a = []\n for i in range(1, 100):\n a.append([])\n for j in range(1, 100):\n a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / \n 100], 1000, False))\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.yticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)')\n plt.savefig('./ps' + str(k) + '.png')\n plt.close()\n print('figure', k, 'done')\n",
"step-4": "import matplotlib.pyplot as plt\nfrom partisan_symmetry_noplot import partisan_symmetry\nfor k in range(1,100):\n a=[]\n for i in range(1,100):\n a.append([])\n for j in range(1,100):\n a[i-1].append(partisan_symmetry([5*i/100,.20,5*j/100],1000,False))\n\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99),[x/20 for x in range(1,100)])\n plt.yticks(range(99),[x/20 for x in range(1,100)])\n plt.title(\"Partisan Symmetry Difference for (x,\"+str(k)+\",y)\")\n plt.savefig(\"./ps\"+str(k)+\".png\")\n plt.close()\n print(\"figure\",k,\"done\")\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from collections import defaultdict as dd
def grouping(w):
d = dd(list)
for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,
key=str.casefold)):
d[k].append(v)
return dict(sorted(d.items()))
|
normal
|
{
"blob_id": "545794cf4f0b2ab63b6a90951a78f8bdaca3c9e6",
"index": 390,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n",
"step-3": "from collections import defaultdict as dd\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, "utils")
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = "../sources/results/results.log"
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
#use nice latex fonts if latex is installed
#figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE:
print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png"
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
# length_dub += math.ceil(path.path_length())
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ",", q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.20, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
# offset = 0.08
fig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
|
normal
|
{
"blob_id": "b4454d92ab8380e0eded2f7aed737378e1710c72",
"index": 9413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\n<mask token>\nsys.path.append(path_to_utils)\n<mask token>\nprint('using the last results')\n<mask token>\nprint('record', record)\n<mask token>\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\n<mask token>\nop.load_problem_file(PROBLEM_FILE)\n<mask token>\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\n<mask token>\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\n<mask token>\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\n<mask token>\nprint(figsize)\n<mask token>\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n<mask token>\nax.axis('equal')\nfigure_utils.no_axis(ax)\n<mask token>\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-3": "<mask token>\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\n<mask token>\nthis_script_path = os.path.dirname(__file__)\npath_to_utils = os.path.join(this_script_path, 'utils')\nsys.path.append(path_to_utils)\n<mask token>\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\nRESULT_FILE = '../sources/results/results.log'\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\nprint('using the last results')\nrecord = data_vns_sop[-1]\nprint('record', record)\nproblem_type = ProblemType.UNKNOWN\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\nresult_head_angs = []\nsampling_heading = len(sets[0])\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\nmycmap = plt.cm.get_cmap('RdYlBu_r')\nmaxx, maxy = -sys.float_info.max, -sys.float_info.max\nminx, miny = sys.float_info.max, sys.float_info.max\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)\n )\nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\nfig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)\nfigsize = fig_width * 0.9, FIG_HEIGHT\nprint(figsize)\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',\n edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],\n circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=\n 'black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=\n nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',\n lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\ncbar_position = [0.2, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-4": "import sys, os\nimport random\nimport numpy as np\nimport matplotlib as mpl\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport shapely.geometry as geometry\nfrom shapely.ops import cascaded_union, polygonize\nimport math\nfrom matplotlib.pyplot import arrow\nimport dubins\nthis_script_path = os.path.dirname(__file__)\npath_to_utils = os.path.join(this_script_path, 'utils')\nsys.path.append(path_to_utils)\nimport figure_utils\nimport orienteering_utils\nfrom orienteering_utils import ProblemType\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\nRESULT_FILE = '../sources/results/results.log'\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\nprint('using the last results')\nrecord = data_vns_sop[-1]\nprint('record', record)\nproblem_type = ProblemType.UNKNOWN\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\nresult_head_angs = []\nsampling_heading = len(sets[0])\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\nmycmap = plt.cm.get_cmap('RdYlBu_r')\nmaxx, maxy = -sys.float_info.max, -sys.float_info.max\nminx, miny = sys.float_info.max, sys.float_info.max\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)\n )\nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\nfig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)\nfigsize = fig_width * 0.9, FIG_HEIGHT\nprint(figsize)\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',\n edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],\n circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=\n 'black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=\n nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',\n lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\ncbar_position = [0.2, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys, os\nimport random\nimport numpy as np\n\nimport matplotlib as mpl\nif os.environ.get('DISPLAY','') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport shapely.geometry as geometry\nfrom shapely.ops import cascaded_union, polygonize\nimport math\nfrom matplotlib.pyplot import arrow\nimport dubins\nthis_script_path = os.path.dirname(__file__) \npath_to_utils = os.path.join(this_script_path, \"utils\") \nsys.path.append(path_to_utils)\nimport figure_utils\nimport orienteering_utils\nfrom orienteering_utils import ProblemType\n\n\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\n\nRESULT_FILE = \"../sources/results/results.log\"\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\n \n#use nice latex fonts if latex is installed\n#figure_utils.configure_latex_fonts_latex()\n\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\n\nprint(\"using the last results\")\nrecord = data_vns_sop[-1]\nprint(\"record\", record)\n\nproblem_type = ProblemType.UNKNOWN\n\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\n\nif \"datasets/sop/\" in PROBLEM_FILE:\n print(\"showing SOP\")\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = \"solution_sop.png\"\n\nelif \"datasets/dop_sop_dataset/\" in PROBLEM_FILE:\n print(\"showing DOP\")\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = \"solution_dop.png\"\n\nelif \"datasets/opn_sop_dataset/\" in PROBLEM_FILE:\n print(\"showing OPN\")\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = \"solution_opn.png\"\n \nelse:\n error(\"can not decide problem type based on problem file location\")\n problem_type = ProblemType.UNKNOWN\n\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\n\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint(\"problem loaded\")\nprint(\"result_target_ids:\", result_target_ids)\nprint(\"result_cluster_ids:\", result_cluster_ids)\nprint(\"result_rewards\", result_rewards)\nprint(\"sets_prices\", sets_prices)\nprint(\"sets\", sets)\nprint(\"nodes\", nodes)\n\n# for the DOP only\nresult_head_angs = []\nsampling_heading = len(sets[0])\n\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n # result_node_inside_cluster.append(node_inside_cluster)\n head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading\n result_head_angs.append(head_ang)\n\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print(\"what the hell, it is not good\")\n\nprint(\"calc_reward\", calc_reward)\n\nmycmap = plt.cm.get_cmap('RdYlBu_r')\n\nmaxx, maxy = -sys.float_info.max,-sys.float_info.max\nminx, miny = sys.float_info.max,sys.float_info.max\n\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\n\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0] \n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1] \n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN :\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n \n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\n\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\n\n\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)) \nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\n\nfig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)\nfigsize = (fig_width*0.9,FIG_HEIGHT)\nprint(figsize)\n\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\n\n# print(nodes_w_rewards[:, 2])\n\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)): \n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n \n for hind in range(sampling_heading):\n head_ang = math.pi + (2 * math.pi * hind) / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))\n \n set_rew = nodes_w_rewards[nidx1, 2] \n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha) \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]: \n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]: \n if(nidx1 != nidx2):\n node2 = nodes_w_rewards[nidx2, :]\n # plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)\n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\n\nelse: \n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]: \n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]: \n if(nidx1 != nidx2):\n node2 = nodes_w_rewards[nidx2, :]\n # plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)\n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\n \n\nfor node_idx in range(1, len(result_target_ids)):\n \n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n # length_dub += math.ceil(path.path_length())\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, \",\", q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n \n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, \",\", node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)\n\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, \",\", node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)\n\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\n\ncbar_position = [0.20, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\n\n# offset = 0.08\nfig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)\n\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show() \n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# terminal based game in Python
from random import randint
print('Terminal based number guessing game')
while True:
try:
numberOfGames = int(input('Please choose how many games you want to play ---> '))
except:
print('Only numbes accepted')
continue
if (numberOfGames > 0 and numberOfGames < 10):
break;
randomNumbers = []
for i in range(numberOfGames):
randomNumbers.append(randint(1, 10))
for index, number in enumerate(randomNumbers):
print('Game %i' %(index + 1))
guess = 0
attempts = 0
while (guess != number):
try:
guess = int(input('Guess the number ---> '))
except Exception as e:
print('Only numbers accepted')
continue
if (guess > number):
print('Your number is bigger!')
else:
print('Your number is smaller!')
attempts += 1
print('Great you guessed it! Attempts %i' %attempts)
attempts = 0
|
normal
|
{
"blob_id": "20c081dc47f541a988bccef89b8e51f446c80f58",
"index": 5471,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Terminal based number guessing game')\nwhile True:\n try:\n numberOfGames = int(input(\n 'Please choose how many games you want to play ---> '))\n except:\n print('Only numbes accepted')\n continue\n if numberOfGames > 0 and numberOfGames < 10:\n break\n<mask token>\nfor i in range(numberOfGames):\n randomNumbers.append(randint(1, 10))\nfor index, number in enumerate(randomNumbers):\n print('Game %i' % (index + 1))\n guess = 0\n attempts = 0\n while guess != number:\n try:\n guess = int(input('Guess the number ---> '))\n except Exception as e:\n print('Only numbers accepted')\n continue\n if guess > number:\n print('Your number is bigger!')\n else:\n print('Your number is smaller!')\n attempts += 1\n print('Great you guessed it! Attempts %i' % attempts)\n attempts = 0\n",
"step-3": "<mask token>\nprint('Terminal based number guessing game')\nwhile True:\n try:\n numberOfGames = int(input(\n 'Please choose how many games you want to play ---> '))\n except:\n print('Only numbes accepted')\n continue\n if numberOfGames > 0 and numberOfGames < 10:\n break\nrandomNumbers = []\nfor i in range(numberOfGames):\n randomNumbers.append(randint(1, 10))\nfor index, number in enumerate(randomNumbers):\n print('Game %i' % (index + 1))\n guess = 0\n attempts = 0\n while guess != number:\n try:\n guess = int(input('Guess the number ---> '))\n except Exception as e:\n print('Only numbers accepted')\n continue\n if guess > number:\n print('Your number is bigger!')\n else:\n print('Your number is smaller!')\n attempts += 1\n print('Great you guessed it! Attempts %i' % attempts)\n attempts = 0\n",
"step-4": "from random import randint\nprint('Terminal based number guessing game')\nwhile True:\n try:\n numberOfGames = int(input(\n 'Please choose how many games you want to play ---> '))\n except:\n print('Only numbes accepted')\n continue\n if numberOfGames > 0 and numberOfGames < 10:\n break\nrandomNumbers = []\nfor i in range(numberOfGames):\n randomNumbers.append(randint(1, 10))\nfor index, number in enumerate(randomNumbers):\n print('Game %i' % (index + 1))\n guess = 0\n attempts = 0\n while guess != number:\n try:\n guess = int(input('Guess the number ---> '))\n except Exception as e:\n print('Only numbers accepted')\n continue\n if guess > number:\n print('Your number is bigger!')\n else:\n print('Your number is smaller!')\n attempts += 1\n print('Great you guessed it! Attempts %i' % attempts)\n attempts = 0\n",
"step-5": "# terminal based game in Python\nfrom random import randint\n\nprint('Terminal based number guessing game')\nwhile True:\n try:\n numberOfGames = int(input('Please choose how many games you want to play ---> '))\n except:\n print('Only numbes accepted')\n continue\n if (numberOfGames > 0 and numberOfGames < 10):\n break;\n\nrandomNumbers = []\n\nfor i in range(numberOfGames):\n randomNumbers.append(randint(1, 10))\n\nfor index, number in enumerate(randomNumbers):\n print('Game %i' %(index + 1))\n guess = 0\n attempts = 0\n while (guess != number):\n try:\n guess = int(input('Guess the number ---> '))\n except Exception as e:\n print('Only numbers accepted')\n continue\n if (guess > number):\n print('Your number is bigger!')\n else:\n print('Your number is smaller!')\n attempts += 1\n print('Great you guessed it! Attempts %i' %attempts)\n attempts = 0\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 16:38:22 2017
@author: secoder
"""
import io
import random
import nltk
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from collections import OrderedDict
from collections import Counter
from sklearn.metrics import pairwise_distances
import numpy as np
import scipy
import json
import codecs
from dateutil import parser
import time
import datetime
import operator
#import cPickle as pickle
#
#import traceback
from skimage import filters
import unicodedata as ud
from config import project_name
class recommendationsys:
def __init__(self, nyear):
# by default we will filter out those don't have publications in recent 10 years
self.activityyear = 10
self.debug = 0
self.nremd = 3
#----------------------
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
#----------------------
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
"""
"""
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
"""
"""
def resentpublicationsidx(self,authoridx):
#print 'start recentpublications\n'
resentpub = []
idx = self.authortitlesidx[authoridx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
"""
"""
def resentpublications(self,name):
#print 'start recentpublications\n'
resentpub = []
#if isinstance(name, unicode): for python 2.7
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
idx = self.authortitlesidx[idx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs=[]
# for title in self.titles:
# pairs = pairs + list(nltk.bigrams(title.split()))
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self,name):
#print 'start keyword\n'
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
# content = self.authorcontents[idx].lower()
#
# # get the unique words from the content
# content = set(content.split())
#
# i = []
# for c in content:
# count = self.vectorizer.vocabulary_.get(c, 0)
# i.append(count)
#
# i = np.array(i)
# i = i.argsort()
# content = np.array(list(content))
# content = content[i]
# content = content[-3:]
# keywords = list(reversed(content))
#
contentjson = []
# for topic in keywords:
# contentjson.append(OrderedDict([("topic", topic)]))
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
# #print 'start bigram\n'
#
# userpairs = list(nltk.bigrams(content))
#
#
# # do the same on raw titles
#
# keywordsraw=[]
# for p in userpairs:
# pairsdic=self.cfd[p[0]]
# n=pairsdic[p[1]]
# if n>=2:
# keywordsraw.append((p,n))
#
# uniqkeywords=set(keywordsraw)
# keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
#
# finalkeywords=[]
# for p in keywords:
# #c=wn.synsets(p[0][1])[0].pos()
# if (p[1]>=2):
# finalkeywords.append((' '.join(p[0]),p[1],keywordsraw.count(p)))
#
# finalkeywords.reverse()
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
#print 'end bigram\n'
#print 'end keyword\n'
return contentjson
"""
"""
def keywordbyidx(self,idx):
contentjson = []
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
return contentjson
"""
"""
def bigramkeywords(self, text):
#print 'start bigramkeyword\n'
# bigram keywords -------------
#content = text.lower().split()
content = text
#print 'start bigram\n'
userpairs = list(nltk.bigrams(content))
# in case there is no valid keywords due to our requirement
# the one with highest occurrence will be pick from the backup plan
keywordsbackup = []
# the valid keywords
keywords=[]
for p in userpairs:
pairsdic=self.cfd[p[0]]
n=pairsdic[p[1]]
if n>=self.keywordthreshold:
keywords.append((p,n))
keywordsbackup.append((p,n))
finalkeywords=[]
uniqkeywords=set(keywords)
keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
for p in keywords:
if (p[1]>=25) or (userpairs.count(p[0])>1):
finalkeywords.append([' '.join(p[0]),p[1],userpairs.count(p[0])])
finalkeywords.reverse()
if not finalkeywords:
# found valid keywords
uniqkeywords=set(keywordsbackup)
keywordsbackup=sorted(uniqkeywords, key=lambda keywordsbackup: keywordsbackup[1])
finalkeywords.append([' '.join(keywordsbackup[-1][0]), keywordsbackup[-1][1],userpairs.count(keywordsbackup[0])])
else:
# deal with plural
pluralidx = self.findpluralbigram(finalkeywords)
self.removepluralbigram(finalkeywords,pluralidx)
#print 'end bigramkeyword\n'
return finalkeywords
"""
"""
def removepluralbigram(self, bigram, pluralidx):
# if pluralidx is emtpy, just return
if not pluralidx:
print('empty')
return
delcount = 0
pren = 0
for i in pluralidx:
#delcount = 0
for n in i[1:]:
if n > pren:
n = n - delcount
bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]
bigram.remove(bigram[n])
delcount = delcount + 1
pren = n
"""
"""
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
"""
"""
def mycoauthorsV2(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthorship = self.coauthornetV2[idx]
uniqcoauthors = np.array(list(set(coauthorship)))
coauthorcount = []
for i in uniqcoauthors:
coauthorcount.append(coauthorship.count(i))
countidx = np.argsort(coauthorcount)
# reverse it to descend order
countidx = countidx[::-1]
coauthorcount = np.array(coauthorcount)
result = []
for i in countidx:
result.append(OrderedDict([("name",self.authors[uniqcoauthors[i]]),("cooperationCount",coauthorcount[i])]))
return (result,list(uniqcoauthors[countidx]),list(coauthorcount[countidx]))
"""
"""
def mycoauthorsV3(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4bymentionlist(self, name):
if name in self.mentionnetwork.keys():
mentiondict = self.mentionnetwork[name]
else:
mentiondict ={'None':0}
result = []
# sort by mention counts
sorted_mentiondict = sorted(mentiondict.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_mentiondict:
result.append(OrderedDict([("name",i[0]),("cooperationCount",i[1])]))
return result
"""
"""
def mycoauthorsbyyear(self, idx, year):
years = np.array(self.years)
yearidx = np.where(years <= year)[0]
coauthorsidx = [ self.coauthorsidx[i] for i in yearidx]
coauthors = []
for i in coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
return (list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
find the new coauthors for a user in current year against previous year
example: mynewcoauthors(23, 2014, 2015) will returen the new coauthors
in 2015 regarding the year 2014 for user 23. 23 is the index of a user
"""
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
"""
Call the weakties after mynewcoauthors() to find the common nodes
between a user and his/her coming new coauthors in the year before
their coauthorship
"""
def weakties(self, userX, userY, year):
coauthornetX, cx = self.mycoauthorsbyyear(userX, year)
# if userX and userY already have a strong ties, just return []
if userY in coauthornetX:
return ([], [], [])
coauthornetY, cy = self.mycoauthorsbyyear(userY, year)
# find the common nodes
weaktienodes = list(set(coauthornetX).intersection(coauthornetY))
nodescountX = []
nodescountY = []
if weaktienodes:
for i in weaktienodes:
nodescountX.append(cx[coauthornetX.index(i)])
nodescountY.append(cy[coauthornetY.index(i)])
return (weaktienodes, nodescountX, nodescountY)
"""
2nd hoop connection
"""
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i,n],[count1[coauthors1.index(i)],count2[coauthors2.index(n)], count3[coauthors3.index(userY)]]])
"""
Get all the content(paper titles) of the userIdx before
the 'year'(include the year)
"""
def getcontentbyyear(self, userIdx, year):
titleIdx = self.authortitlesidx[userIdx]
titleIdx = np.array(titleIdx)
years = [self.years[i] for i in titleIdx]
years = np.array(years)
# sort the years and put the latest year first
# then the content will also be sorted by recent paper first
years.sort()
years = years[::-1]
yearIdx = np.where(years<=year)[0]
content = [self.titles[i] for i in titleIdx[yearIdx]]
return content
"""
return the most frequent participated venue of a user
"""
def getVenue(self, userIdx):
venues = self.authorbooktitleidx[userIdx]
c = Counter(venues)
frqvenues = c.most_common()
return frqvenues[0][0]
"""
only consider the recent 10 papers
"""
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
# build the corpus of all the content
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
# normalize the different forms of words
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
# reconstruct content for userX and userY use the normalized words
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0]
return cosinesimilarity
"""
network similarity
"""
def networksimilarity(self, userX, userY, year):
# first calculate FG(userX) according to paper
# User similarities on social networks
coauthors, c = self.mycoauthorsbyyear(userX, year)
edgesFG = len(coauthors)
n = 0
for i in coauthors:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(coauthors[n:]))
edgesFG = edgesFG + len(con)
n = n + 1
# second, calculate MFG(userX, userY)
weakties, cx, cy = self.weakties(userX, userY, year)
edgesMFG = 2 * len(weakties)
n = 0
for i in weakties:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(weakties[n:]))
edgesMFG = edgesMFG + len(con)
n = n + 1
# last calculate the network similarity
if edgesFG * edgesMFG:
ns = np.log(edgesMFG)/np.log(2 * edgesFG)
else:
ns = -1
return (ns, edgesFG, edgesMFG, cx, cy)
"""
text processing, normalize the words to their prototype, such as
plural form, progressive, etc
"""
def textnormalizing(self, text):
#l = len(text)
c = 0
for i in text:
# network - networks
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
# bus - buses
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
# study - studies
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
# network - networking
# get - getting
# explore - exploring
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
"""
"""
"""
radius of the cluster
"""
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx,nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
# return [mindis, maxdis, radius]
"""
show contents in the same cluster
"""
def showcontents(self,labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
"""
check if there is digtial in the string
"""
def digstring(self,s):
for i in s:
if i.isdigit():
return True
return False
"""
compute the distance between two points a and b
"""
def distance(self,a,b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a);
b = np.array(b);
return np.sqrt(sum(np.square(a - b)))
"""
"""
def updatecoauthornetworkV2(self,net,authors,namelist):
nameidx = []
for name in namelist:
nameidx.append(authors.index(name))
for i in nameidx:
tmpidx = nameidx[:]
tmpidx.remove(i)
# if net is empty
if not net:
net.append(tmpidx)
else:
if i>len(net)-1:
net.append(tmpidx)
else:
net[i].extend(tmpidx)
"""
load the person or organization label
"""
def per_org_label(self):
f = codecs.open(self.f_perorglabel,'r','utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
"""
"""
def mention_network(self):
f = codecs.open(self.f_mentionnetwork,'r','utf-8')
source=''
network = {}
for line in f:
items = line.split('"')
if source == '':
source = items[0]
target = {}
if source == items[0]:
target[items[1]] = int(items[2])
else:
network[items[0]] = target
source = items[0]
target = {}
f.close()
return network
"""
"""
def docluster(self):
tokenizer = RegexpTokenizer(r'\w+')
self.rawtitles = []
self.titles = []
self.allcorp = []
sw = set(nltk.corpus.stopwords.words('english'))
self.debugmsg('start titles \n', 0)
f = codecs.open(self.f_titles,'r','utf-8')
for line in f:
# remove the '\n' at the end
if line[-1] == '\n':
line = line[:-1]
self.rawtitles.append(line)
line = line.lower()
tokenlist = tokenizer.tokenize(line)
self.allcorp += tokenlist
#for corp in newline:
# self.allcorp.append(corp)
# collect all the words except digtals and stopwords
tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in sw) & ~(self.digstring(w))])
self.titles.append(tokenlist)
f.close()
# end use codecs
# filename = './CHI/CHI_authors.txt'
self.authordict = {}
self.authors = []
self.authorcontents = []
self.authorrawcontents = []
self.authortitlesidx = []
self.authorbooktitleidx = []
self.coathors = []
self.coauthorsidx = [] # undirect link, etc, dblp coauthorship network
self.mentionnetwork = {} # direct link, etc,tweet mention network
self.id_name = {}
self.coauthornetV2 = []
# readin the mention network
self.mentionnetwork = self.mention_network()
# read years
self.debugmsg('start year \n', 0)
self.years = []
f = codecs.open(self.f_years,'r','utf-8')
for line in f:
# remive \n
if line[-1] == '\n':
line = line[:-1]
if line == '':
line = 0
#line = line.split()
#year = line[-1]
timestamp = time.mktime(parser.parse(line).timetuple())
self.years.append(int(timestamp))
f.close()
# read conference
self.debugmsg('start booktitle \n', 0)
self.booktitle = []
f = codecs.open(self.f_booktitle,'r','utf-8')
for line in f:
# remove the \n at the end
line = line[:-1]
self.booktitle.append(line)
f.close()
# read authors
self.debugmsg('start authors \n', 0)
i = 0
m = 0
f = codecs.open(self.f_authors,'r','utf-8')
for line in f:
# remove the last '\n'
line = line[:-1]
# split the authors by ','
newline = line.split(",")
namelist = newline
self.coathors.append(namelist)
authoridx = []
for name in newline:
# dictonary version
idx = self.authordict.get(name)
if idx is not None:
self.authortitlesidx[idx].append(i)
self.authorbooktitleidx[idx].append(i)
self.authorcontents[idx] = self.authorcontents[idx] + ' ' + self.titles[i]
self.authorrawcontents[idx] = self.authorrawcontents[idx] + ' ' + self.rawtitles[i]
else:
self.authors.append(name)
self.authordict[name] = m
self.authorcontents.append(self.titles[i])
self.authorrawcontents.append(self.rawtitles[i])
self.authortitlesidx.append([i])
self.authorbooktitleidx.append([i])
idx = m
m = m + 1
authoridx.append(idx)
# end dict version
self.coauthorsidx.append(authoridx)
i = i + 1
f.close()
f = codecs.open(self.f_authors_id,'r','utf-8')
i = 0
preline = ''
for line in f:
if preline != line:
#print(i)
#print('preline: {}, line: {}'.format(preline, line))
if line[-1] == '\n':
newline = line[:-1]
self.id_name[self.authors[i]] = newline
preline = line
i = i + 1
else:
continue
#print(i)
f.close()
# load the per and org classification result
self.per_org_label()
self.vectorizer = CountVectorizer(max_df=0.95, min_df=1,stop_words='english')
X = self.vectorizer.fit_transform(self.authorcontents)
#Xarray = X.toarray()
Xarray = X
#plt.plot(hist)
transformer = TfidfTransformer()
self.tfidf = transformer.fit_transform(Xarray)
#self.tfidfarray = self.tfidf.toarray()
self.tfidfarray = self.tfidf
self.featurenames = self.vectorizer.get_feature_names()
"""
"""
def recommendationV3(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfo(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV3 \n', 0)
return self.result
"""
"""
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
"""
find n nearset neighbors of point p in given space using linear search
if n == 0, sort all the points in space
"""
def nNNlinesearch(self, space, p, n):
closeauthordis = []
closeauthordis = pairwise_distances(space, p, metric='cosine')
closeauthordis = closeauthordis.flatten()
closeauthors = closeauthordis.argsort()
closeauthordis.sort()
if n > 0 :
closeauthors = closeauthors[0:n]
closeauthordis = closeauthordis[0:n]
# delete myself, cuz the distance is always 0
idx = np.where(closeauthors == self.myidx)[0][0]
closeauthors = np.delete(closeauthors, idx)
closeauthordis = np.delete(closeauthordis, idx)
return (closeauthors, closeauthordis)
"""
split the distance in to 3 groups using otsu filtering
return the first index of each group
"""
def otsufilter(self, tdis):
trd = np.zeros(3, int)
#tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# the first index of each group
# trd[1] = len(tdis[tdis<t1])
# trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the medium 3 in the medium group
# get the last 3 in the far group
trd[1] = len(tdis[tdis<t1]) + int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
trd[2] = len(tdis) - 3
return trd
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove my co-authors
"""
def getremdinfo(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
# remove the coauthor
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove known people in the mention network
"""
def getremdinfoV2(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
#[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
# skip the coauthor
return []
#
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
# get the recommendation's mention list
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors", coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
"""
def updatedistance(self):
# 1st degree connection in coauthorship
deg1con=self.coauthornet[self.myidx,self.closeauthors]
deg1conidx = np.where(deg1con>0)[0]
#deg1con = deg1con[deg1con>0]
# 2nd degree connection in coauthorship
deg2conidx = np.where(deg1con==0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i,self.closeauthors[deg2conidx]]
deg1con = deg1con[deg1con>0]
deg1con = deg1con/max(deg1con)
return (deg1conidx, deg1con,deg2conidx,deg2con)
"""
return the top N recommendations:
recommendations, coauthors, researchtopics, recentpub(at least 3 and no
morethan 5 years)
"""
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
#coauthors = []
#researchtopic = []
#recentpub = []
#coauthorsjson = []
#[coauthors, idx, c] = self.mycoauthors(name)
#[coauthors, idx, c] = self.mycoauthorsV2(name)
#[coauthors, idx, c] = self.mycoauthorsV3(name)
[coauthors, idx, c] = self.mycoauthorsV4(name)
# remove the coauthors
if idx.count(self.myidx):
i = i+1
continue
recentpub = self.resentpublications(name)
# check if the recentpub is empty which is not active anymore
if not recentpub:
i = i+1
continue
# --
self.filteredauthors.append(name)
# take too much time skip in test
# researchtopic = self.keyword(name)
researchtopic = []
researchtopic.append(OrderedDict([("topic", "TBD")]))
#recommendations.append({'name':name, 'coAuthors':coauthors, 'researchTopcs':researchtopic, 'recentPublications':recentpub} )
recommendations.append(OrderedDict([("name",name), ("relevancy",self.closeauthordis[i]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)]))
#result={'name':user, 'recommendations':recommendations};
# save the picked idx
self.filteridx.append(i)
i = i+1
# only need top n recommendations
if len(self.filteridx) == n:
break
return recommendations
"""
"""
def thresholdrecommendations(self, remds,n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# get the top 3 in each group
self.trd[1] = len(tdis[tdis<t1])
self.trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the top 3 in first group, median 3 in second group,
# last 3 in third group
# self.trd[1] = int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
# self.trd[2] = len(tdis) - 3
for i in range(3):
for j in range(int(n/3)):
k = int(self.trd[i]+j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
"""
"""
def filteredcloseauthordis(self):
return self.closeauthordis[self.filteridx]
"""
"""
def save_json(self,filename):
PROJECT_DIRECTORY = 'output/project/' + project_name + '/'
with io.open(PROJECT_DIRECTORY + filename +'.json','w',encoding="utf-8") as outfile:
outfile.write((json.dumps((self.result), ensure_ascii=False)))
|
normal
|
{
"blob_id": "4a8a733a965e25ad7ef53600fad6dd47343655b0",
"index": 8677,
"step-1": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n <mask token>\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n <mask token>\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n <mask token>\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n <mask token>\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n <mask token>\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n <mask token>\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n <mask token>\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n <mask token>\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n <mask token>\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n\n def resentpublications(self, name):\n resentpub = []\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n idx = self.authortitlesidx[idx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def keywordbyidx(self, idx):\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def bigramkeywords(self, text):\n content = text\n userpairs = list(nltk.bigrams(content))\n keywordsbackup = []\n keywords = []\n for p in userpairs:\n pairsdic = self.cfd[p[0]]\n n = pairsdic[p[1]]\n if n >= self.keywordthreshold:\n keywords.append((p, n))\n keywordsbackup.append((p, n))\n finalkeywords = []\n uniqkeywords = set(keywords)\n keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if p[1] >= 25 or userpairs.count(p[0]) > 1:\n finalkeywords.append([' '.join(p[0]), p[1], userpairs.count\n (p[0])])\n finalkeywords.reverse()\n if not finalkeywords:\n uniqkeywords = set(keywordsbackup)\n keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:\n keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]),\n keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])\n else:\n pluralidx = self.findpluralbigram(finalkeywords)\n self.removepluralbigram(finalkeywords, pluralidx)\n return finalkeywords\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n countidx = np.argsort(coauthorcount)\n countidx = countidx[::-1]\n coauthorcount = np.array(coauthorcount)\n result = []\n for i in countidx:\n result.append(OrderedDict([('name', self.authors[uniqcoauthors[\n i]]), ('cooperationCount', coauthorcount[i])]))\n return result, list(uniqcoauthors[countidx]), list(coauthorcount[\n countidx])\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4bymentionlist(self, name):\n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict = {'None': 0}\n result = []\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.\n itemgetter(1), reverse=True)\n for i in sorted_mentiondict:\n result.append(OrderedDict([('name', i[0]), ('cooperationCount',\n i[1])]))\n return result\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n <mask token>\n <mask token>\n\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n return frqvenues[0][0]\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n\n def networksimilarity(self, userX, userY, year):\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n edgesFG = len(coauthors)\n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n weakties, cx, cy = self.weakties(userX, userY, year)\n edgesMFG = 2 * len(weakties)\n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG) / np.log(2 * edgesFG)\n else:\n ns = -1\n return ns, edgesFG, edgesMFG, cx, cy\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n\n def updatecoauthornetworkV2(self, net, authors, namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n if not net:\n net.append(tmpidx)\n elif i > len(net) - 1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')\n source = ''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n f.close()\n return network\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n <mask token>\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n\n def resentpublications(self, name):\n resentpub = []\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n idx = self.authortitlesidx[idx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def keywordbyidx(self, idx):\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def bigramkeywords(self, text):\n content = text\n userpairs = list(nltk.bigrams(content))\n keywordsbackup = []\n keywords = []\n for p in userpairs:\n pairsdic = self.cfd[p[0]]\n n = pairsdic[p[1]]\n if n >= self.keywordthreshold:\n keywords.append((p, n))\n keywordsbackup.append((p, n))\n finalkeywords = []\n uniqkeywords = set(keywords)\n keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if p[1] >= 25 or userpairs.count(p[0]) > 1:\n finalkeywords.append([' '.join(p[0]), p[1], userpairs.count\n (p[0])])\n finalkeywords.reverse()\n if not finalkeywords:\n uniqkeywords = set(keywordsbackup)\n keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:\n keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]),\n keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])\n else:\n pluralidx = self.findpluralbigram(finalkeywords)\n self.removepluralbigram(finalkeywords, pluralidx)\n return finalkeywords\n <mask token>\n\n def removepluralbigram(self, bigram, pluralidx):\n if not pluralidx:\n print('empty')\n return\n delcount = 0\n pren = 0\n for i in pluralidx:\n for n in i[1:]:\n if n > pren:\n n = n - delcount\n bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]\n bigram.remove(bigram[n])\n delcount = delcount + 1\n pren = n\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n countidx = np.argsort(coauthorcount)\n countidx = countidx[::-1]\n coauthorcount = np.array(coauthorcount)\n result = []\n for i in countidx:\n result.append(OrderedDict([('name', self.authors[uniqcoauthors[\n i]]), ('cooperationCount', coauthorcount[i])]))\n return result, list(uniqcoauthors[countidx]), list(coauthorcount[\n countidx])\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4bymentionlist(self, name):\n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict = {'None': 0}\n result = []\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.\n itemgetter(1), reverse=True)\n for i in sorted_mentiondict:\n result.append(OrderedDict([('name', i[0]), ('cooperationCount',\n i[1])]))\n return result\n <mask token>\n\n def mycoauthorsbyyear(self, idx, year):\n years = np.array(self.years)\n yearidx = np.where(years <= year)[0]\n coauthorsidx = [self.coauthorsidx[i] for i in yearidx]\n coauthors = []\n for i in coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n return list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n\n def getcontentbyyear(self, userIdx, year):\n titleIdx = self.authortitlesidx[userIdx]\n titleIdx = np.array(titleIdx)\n years = [self.years[i] for i in titleIdx]\n years = np.array(years)\n years.sort()\n years = years[::-1]\n yearIdx = np.where(years <= year)[0]\n content = [self.titles[i] for i in titleIdx[yearIdx]]\n return content\n <mask token>\n\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n return frqvenues[0][0]\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n\n def networksimilarity(self, userX, userY, year):\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n edgesFG = len(coauthors)\n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n weakties, cx, cy = self.weakties(userX, userY, year)\n edgesMFG = 2 * len(weakties)\n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG) / np.log(2 * edgesFG)\n else:\n ns = -1\n return ns, edgesFG, edgesMFG, cx, cy\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n\n def updatecoauthornetworkV2(self, net, authors, namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n if not net:\n net.append(tmpidx)\n elif i > len(net) - 1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')\n source = ''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n f.close()\n return network\n <mask token>\n\n def docluster(self):\n tokenizer = RegexpTokenizer('\\\\w+')\n self.rawtitles = []\n self.titles = []\n self.allcorp = []\n sw = set(nltk.corpus.stopwords.words('english'))\n self.debugmsg('start titles \\n', 0)\n f = codecs.open(self.f_titles, 'r', 'utf-8')\n for line in f:\n if line[-1] == '\\n':\n line = line[:-1]\n self.rawtitles.append(line)\n line = line.lower()\n tokenlist = tokenizer.tokenize(line)\n self.allcorp += tokenlist\n tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in\n sw) & ~self.digstring(w)])\n self.titles.append(tokenlist)\n f.close()\n self.authordict = {}\n self.authors = []\n self.authorcontents = []\n self.authorrawcontents = []\n self.authortitlesidx = []\n self.authorbooktitleidx = []\n self.coathors = []\n self.coauthorsidx = []\n self.mentionnetwork = {}\n self.id_name = {}\n self.coauthornetV2 = []\n self.mentionnetwork = self.mention_network()\n self.debugmsg('start year \\n', 0)\n self.years = []\n f = codecs.open(self.f_years, 'r', 'utf-8')\n for line in f:\n if line[-1] == '\\n':\n line = line[:-1]\n if line == '':\n line = 0\n timestamp = time.mktime(parser.parse(line).timetuple())\n self.years.append(int(timestamp))\n f.close()\n self.debugmsg('start booktitle \\n', 0)\n self.booktitle = []\n f = codecs.open(self.f_booktitle, 'r', 'utf-8')\n for line in f:\n line = line[:-1]\n self.booktitle.append(line)\n f.close()\n self.debugmsg('start authors \\n', 0)\n i = 0\n m = 0\n f = codecs.open(self.f_authors, 'r', 'utf-8')\n for line in f:\n line = line[:-1]\n newline = line.split(',')\n namelist = newline\n self.coathors.append(namelist)\n authoridx = []\n for name in newline:\n idx = self.authordict.get(name)\n if idx is not None:\n self.authortitlesidx[idx].append(i)\n self.authorbooktitleidx[idx].append(i)\n self.authorcontents[idx] = self.authorcontents[idx\n ] + ' ' + self.titles[i]\n self.authorrawcontents[idx] = self.authorrawcontents[idx\n ] + ' ' + self.rawtitles[i]\n else:\n self.authors.append(name)\n self.authordict[name] = m\n self.authorcontents.append(self.titles[i])\n self.authorrawcontents.append(self.rawtitles[i])\n self.authortitlesidx.append([i])\n self.authorbooktitleidx.append([i])\n idx = m\n m = m + 1\n authoridx.append(idx)\n self.coauthorsidx.append(authoridx)\n i = i + 1\n f.close()\n f = codecs.open(self.f_authors_id, 'r', 'utf-8')\n i = 0\n preline = ''\n for line in f:\n if preline != line:\n if line[-1] == '\\n':\n newline = line[:-1]\n self.id_name[self.authors[i]] = newline\n preline = line\n i = i + 1\n else:\n continue\n f.close()\n self.per_org_label()\n self.vectorizer = CountVectorizer(max_df=0.95, min_df=1, stop_words\n ='english')\n X = self.vectorizer.fit_transform(self.authorcontents)\n Xarray = X\n transformer = TfidfTransformer()\n self.tfidf = transformer.fit_transform(Xarray)\n self.tfidfarray = self.tfidf\n self.featurenames = self.vectorizer.get_feature_names()\n <mask token>\n\n def recommendationV3(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfo(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV3 \\n', 0)\n return self.result\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n\n def nNNlinesearch(self, space, p, n):\n closeauthordis = []\n closeauthordis = pairwise_distances(space, p, metric='cosine')\n closeauthordis = closeauthordis.flatten()\n closeauthors = closeauthordis.argsort()\n closeauthordis.sort()\n if n > 0:\n closeauthors = closeauthors[0:n]\n closeauthordis = closeauthordis[0:n]\n idx = np.where(closeauthors == self.myidx)[0][0]\n closeauthors = np.delete(closeauthors, idx)\n closeauthordis = np.delete(closeauthordis, idx)\n return closeauthors, closeauthordis\n <mask token>\n\n def otsufilter(self, tdis):\n trd = np.zeros(3, int)\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n trd[1] = len(tdis[tdis < t1]) + int((len(tdis[tdis < t2]) - len(\n tdis[tdis < t1])) / 2) - 1\n trd[2] = len(tdis) - 3\n return trd\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n <mask token>\n\n def save_json(self, filename):\n PROJECT_DIRECTORY = 'output/project/' + project_name + '/'\n with io.open(PROJECT_DIRECTORY + filename + '.json', 'w', encoding=\n 'utf-8') as outfile:\n outfile.write(json.dumps(self.result, ensure_ascii=False))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 12 16:38:22 2017\n\n@author: secoder\n\"\"\"\nimport io\nimport random\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\nfrom collections import OrderedDict\nfrom collections import Counter\n\nfrom sklearn.metrics import pairwise_distances\n\nimport numpy as np\nimport scipy\n\nimport json\nimport codecs\n\nfrom dateutil import parser\nimport time\nimport datetime\n\nimport operator\n\n#import cPickle as pickle\n#\n#import traceback\n\nfrom skimage import filters\n\nimport unicodedata as ud\n\nfrom config import project_name\n\n\nclass recommendationsys:\n \n def __init__(self, nyear):\n \n # by default we will filter out those don't have publications in recent 10 years\n self.activityyear = 10\n\n self.debug = 0 \n self.nremd = 3\n \n #----------------------\n PROJECT_DIRECTORY = 'output/project/' + project_name\n\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n\n \n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n #---------------------- \n \n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n \n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n \n \n \"\"\"\n \"\"\"\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n \n \"\"\"\n \"\"\" \n def resentpublicationsidx(self,authoridx):\n #print 'start recentpublications\\n'\n resentpub = []\n \n idx = self.authortitlesidx[authoridx]\n \n # sort by years\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n \n # if the most recent publication is before the 'nyears' \n # remove this one from the list\n if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):\n return resentpub\n # ---- \n \n for i in idx:\n authorsjson = [] \n \n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([(\"name\",author)]))\n \n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\"%Y-%m-%d %H:%M:%S\")\n resentpub.append(OrderedDict([(\"title\",self.rawtitles[i]),(\"authors\",authorsjson), (\"year\",date),(\"publicationVenue\",self.booktitle[i])]))\n\n #print 'end recentpublications\\n'\n return resentpub\n \n \n \"\"\"\n \"\"\"\n def resentpublications(self,name):\n #print 'start recentpublications\\n'\n resentpub = []\n\n #if isinstance(name, unicode): for python 2.7\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n idx = self.authortitlesidx[idx]\n \n # sort by years\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n \n # if the most recent publication is before the 'nyears' \n # remove this one from the list\n if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):\n return resentpub\n # ---- \n \n for i in idx:\n authorsjson = [] \n \n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([(\"name\",author)]))\n \n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\"%Y-%m-%d %H:%M:%S\")\n resentpub.append(OrderedDict([(\"title\",self.rawtitles[i]),(\"authors\",authorsjson), (\"year\",date),(\"publicationVenue\",self.booktitle[i])]))\n\n #print 'end recentpublications\\n'\n return resentpub\n \n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs=[]\n\n# for title in self.titles:\n# pairs = pairs + list(nltk.bigrams(title.split()))\n\n pairs = nltk.bigrams(self.allcorp)\n \n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n \n def keyword(self,name):\n #print 'start keyword\\n'\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n# content = self.authorcontents[idx].lower()\n# \n# # get the unique words from the content\n# content = set(content.split())\n# \n# i = []\n# for c in content:\n# count = self.vectorizer.vocabulary_.get(c, 0) \n# i.append(count)\n# \n# i = np.array(i)\n# i = i.argsort()\n# content = np.array(list(content))\n# content = content[i]\n# content = content[-3:]\n# keywords = list(reversed(content)) \n# \n contentjson = []\n# for topic in keywords:\n# contentjson.append(OrderedDict([(\"topic\", topic)]))\n \n # bigram keywords -------------\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n# #print 'start bigram\\n'\n# \n# userpairs = list(nltk.bigrams(content))\n# \n# \n# # do the same on raw titles \n# \n# keywordsraw=[]\n# for p in userpairs:\n# pairsdic=self.cfd[p[0]]\n# n=pairsdic[p[1]]\n# if n>=2:\n# keywordsraw.append((p,n))\n# \n# uniqkeywords=set(keywordsraw)\n# keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])\n# \n# finalkeywords=[]\n# for p in keywords:\n# #c=wn.synsets(p[0][1])[0].pos()\n# if (p[1]>=2):\n# finalkeywords.append((' '.join(p[0]),p[1],keywordsraw.count(p)))\n# \n# finalkeywords.reverse()\n \n for topic in finalkeywords:\n #print topic[0]\n contentjson.append(OrderedDict([(\"topic\", topic[0])]))\n \n #print 'end bigram\\n'\n #print 'end keyword\\n'\n return contentjson\n \n \"\"\"\n \"\"\"\n def keywordbyidx(self,idx):\n \n contentjson = []\n \n # bigram keywords -------------\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n \n for topic in finalkeywords:\n #print topic[0]\n contentjson.append(OrderedDict([(\"topic\", topic[0])]))\n \n return contentjson\n\n \n \"\"\"\n \"\"\" \n def bigramkeywords(self, text):\n #print 'start bigramkeyword\\n'\n # bigram keywords -------------\n #content = text.lower().split()\n content = text\n #print 'start bigram\\n'\n \n userpairs = list(nltk.bigrams(content))\n \n \n # in case there is no valid keywords due to our requirement\n # the one with highest occurrence will be pick from the backup plan \n keywordsbackup = []\n # the valid keywords\n keywords=[]\n for p in userpairs:\n pairsdic=self.cfd[p[0]]\n n=pairsdic[p[1]]\n if n>=self.keywordthreshold:\n keywords.append((p,n))\n keywordsbackup.append((p,n))\n\n finalkeywords=[]\n \n uniqkeywords=set(keywords)\n keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if (p[1]>=25) or (userpairs.count(p[0])>1):\n finalkeywords.append([' '.join(p[0]),p[1],userpairs.count(p[0])])\n \n finalkeywords.reverse() \n \n \n \n if not finalkeywords:\n # found valid keywords\n uniqkeywords=set(keywordsbackup)\n keywordsbackup=sorted(uniqkeywords, key=lambda keywordsbackup: keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]), keywordsbackup[-1][1],userpairs.count(keywordsbackup[0])])\n else: \n # deal with plural\n pluralidx = self.findpluralbigram(finalkeywords)\n \n self.removepluralbigram(finalkeywords,pluralidx)\n \n \n #print 'end bigramkeyword\\n'\n return finalkeywords\n \n \"\"\"\n \"\"\"\n def removepluralbigram(self, bigram, pluralidx):\n # if pluralidx is emtpy, just return\n if not pluralidx:\n print('empty')\n return \n \n delcount = 0\n pren = 0 \n\n for i in pluralidx:\n #delcount = 0\n for n in i[1:]:\n if n > pren:\n n = n - delcount\n\n bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]\n bigram.remove(bigram[n])\n delcount = delcount + 1\n pren = n\n \n \n \"\"\"\n \"\"\"\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n\n c.append(t1)\n \n uniqbigram = list(set(c))\n pluralidx = []\n \n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n \n return pluralidx\n \"\"\"\n \"\"\"\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n \n countidx = np.argsort(coauthorcount)\n # reverse it to descend order\n countidx = countidx[::-1]\n \n coauthorcount = np.array(coauthorcount)\n \n result = []\n for i in countidx:\n result.append(OrderedDict([(\"name\",self.authors[uniqcoauthors[i]]),(\"cooperationCount\",coauthorcount[i])]))\n return (result,list(uniqcoauthors[countidx]),list(coauthorcount[countidx]))\n \n \n \n \"\"\"\n \"\"\"\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n\n \"\"\"\n \"\"\"\n def mycoauthorsV4(self, name):\n \n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n \n \n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n \"\"\"\n def mycoauthorsV4byidx(self, idx):\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n \n \n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n \"\"\"\n def mycoauthorsV4bymentionlist(self, name):\n \n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict ={'None':0}\n \n \n result = []\n # sort by mention counts\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.itemgetter(1), reverse=True)\n \n for i in sorted_mentiondict:\n result.append(OrderedDict([(\"name\",i[0]),(\"cooperationCount\",i[1])]))\n \n return result\n \"\"\"\n \"\"\"\n def mycoauthorsbyyear(self, idx, year):\n \n years = np.array(self.years)\n\n yearidx = np.where(years <= year)[0]\n coauthorsidx = [ self.coauthorsidx[i] for i in yearidx]\n \n coauthors = []\n for i in coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n return (list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n find the new coauthors for a user in current year against previous year\n example: mynewcoauthors(23, 2014, 2015) will returen the new coauthors\n in 2015 regarding the year 2014 for user 23. 23 is the index of a user\n \"\"\"\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n \n return newCoauthors\n\n \"\"\"\n Call the weakties after mynewcoauthors() to find the common nodes \n between a user and his/her coming new coauthors in the year before\n their coauthorship\n \"\"\"\n def weakties(self, userX, userY, year):\n \n coauthornetX, cx = self.mycoauthorsbyyear(userX, year)\n \n # if userX and userY already have a strong ties, just return []\n if userY in coauthornetX:\n return ([], [], [])\n \n coauthornetY, cy = self.mycoauthorsbyyear(userY, year)\n \n # find the common nodes \n weaktienodes = list(set(coauthornetX).intersection(coauthornetY))\n \n nodescountX = []\n nodescountY = []\n \n if weaktienodes:\n for i in weaktienodes:\n nodescountX.append(cx[coauthornetX.index(i)])\n nodescountY.append(cy[coauthornetY.index(i)])\n \n \n return (weaktienodes, nodescountX, nodescountY)\n \n \"\"\"\n 2nd hoop connection\n \"\"\"\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i,n],[count1[coauthors1.index(i)],count2[coauthors2.index(n)], count3[coauthors3.index(userY)]]])\n\n\n \"\"\"\n Get all the content(paper titles) of the userIdx before \n the 'year'(include the year) \n \"\"\"\n def getcontentbyyear(self, userIdx, year):\n titleIdx = self.authortitlesidx[userIdx]\n\n titleIdx = np.array(titleIdx)\n\n years = [self.years[i] for i in titleIdx]\n\n years = np.array(years)\n \n # sort the years and put the latest year first\n # then the content will also be sorted by recent paper first\n years.sort()\n years = years[::-1]\n\n yearIdx = np.where(years<=year)[0]\n \n content = [self.titles[i] for i in titleIdx[yearIdx]]\n \n return content\n\n \"\"\"\n return the most frequent participated venue of a user\n \"\"\"\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n \n return frqvenues[0][0]\n\n \"\"\"\n only consider the recent 10 papers\n \"\"\"\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n \n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n \n # build the corpus of all the content\n contents = []\n \n \n for i in contentX:\n contents.extend(i.split(' '))\n \n lenx = len(contents)\n \n for i in contentY:\n contents.extend(i.split(' '))\n \n # normalize the different forms of words \n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents] \n \n # reconstruct content for userX and userY use the normalized words\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n\n\n \n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(newcontentY)])\n \n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0]\n \n return cosinesimilarity\n\n \"\"\"\n network similarity\n \"\"\"\n def networksimilarity(self, userX, userY, year):\n \n # first calculate FG(userX) according to paper\n # User similarities on social networks\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n \n edgesFG = len(coauthors)\n \n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n \n # second, calculate MFG(userX, userY)\n weakties, cx, cy = self.weakties(userX, userY, year)\n \n edgesMFG = 2 * len(weakties)\n \n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n \n # last calculate the network similarity\n \n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG)/np.log(2 * edgesFG)\n else:\n ns = -1\n \n return (ns, edgesFG, edgesMFG, cx, cy)\n\n \"\"\"\n text processing, normalize the words to their prototype, such as \n plural form, progressive, etc\n \"\"\"\n def textnormalizing(self, text):\n #l = len(text)\n c = 0\n for i in text:\n # network - networks\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # bus - buses\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # study - studies \n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # network - networking\n # get - getting\n # explore - exploring \n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n \n c = c + 1\n \n return text\n \n \"\"\"\n \"\"\"\n\n \n \"\"\"\n radius of the cluster\n \"\"\"\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n \n dis = dismatrix[idx,nth]\n \n self.mindis = min(dis)\n self.maxdis = max(dis)\n \n self.radius = self.maxdis\n \n \n \n # return [mindis, maxdis, radius]\n \n \n \"\"\"\n show contents in the same cluster\n \"\"\"\n def showcontents(self,labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n \n return contents\n \n \"\"\"\n check if there is digtial in the string\n \"\"\"\n def digstring(self,s):\n for i in s:\n if i.isdigit():\n return True\n return False\n \n \"\"\"\n compute the distance between two points a and b\n \"\"\"\n def distance(self,a,b):\n\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n \n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n \n a = np.array(a);\n b = np.array(b);\n return np.sqrt(sum(np.square(a - b)))\n \n \"\"\"\n \"\"\"\n def updatecoauthornetworkV2(self,net,authors,namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n \n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n # if net is empty\n if not net:\n net.append(tmpidx)\n else:\n if i>len(net)-1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n\n \"\"\"\n load the person or organization label\n \"\"\"\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel,'r','utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1] \n f.close()\n self.labels = labels\n\n \"\"\"\n \"\"\"\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork,'r','utf-8')\n source=''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n\t\t\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n \n f.close()\n return network\n \n \n \"\"\"\n \"\"\"\n def docluster(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n\n\n self.rawtitles = []\n self.titles = []\n self.allcorp = []\n\n sw = set(nltk.corpus.stopwords.words('english'))\n \n \n self.debugmsg('start titles \\n', 0)\n f = codecs.open(self.f_titles,'r','utf-8')\n for line in f: \n # remove the '\\n' at the end\n if line[-1] == '\\n':\n line = line[:-1]\n self.rawtitles.append(line)\n line = line.lower()\n tokenlist = tokenizer.tokenize(line)\n \n self.allcorp += tokenlist\n #for corp in newline:\n # self.allcorp.append(corp)\n \n # collect all the words except digtals and stopwords\n tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in sw) & ~(self.digstring(w))])\n self.titles.append(tokenlist)\n f.close()\n # end use codecs\n \n # filename = './CHI/CHI_authors.txt'\n self.authordict = {}\n self.authors = []\n self.authorcontents = []\n self.authorrawcontents = []\n self.authortitlesidx = []\n self.authorbooktitleidx = []\n self.coathors = []\n self.coauthorsidx = [] # undirect link, etc, dblp coauthorship network\n self.mentionnetwork = {} # direct link, etc,tweet mention network\n self.id_name = {}\n \n\n self.coauthornetV2 = []\n \n # readin the mention network\n self.mentionnetwork = self.mention_network()\n \n # read years\n self.debugmsg('start year \\n', 0)\n self.years = []\n \n f = codecs.open(self.f_years,'r','utf-8')\n for line in f:\n # remive \\n\n if line[-1] == '\\n':\n line = line[:-1]\n if line == '':\n line = 0\n #line = line.split()\n #year = line[-1]\n timestamp = time.mktime(parser.parse(line).timetuple())\n self.years.append(int(timestamp))\n f.close()\n \n # read conference \n self.debugmsg('start booktitle \\n', 0)\n self.booktitle = []\n \n f = codecs.open(self.f_booktitle,'r','utf-8')\n for line in f:\n # remove the \\n at the end\n line = line[:-1]\n self.booktitle.append(line)\n f.close()\n \n # read authors\n self.debugmsg('start authors \\n', 0)\n i = 0\n m = 0\n f = codecs.open(self.f_authors,'r','utf-8')\n\n for line in f:\n # remove the last '\\n' \n line = line[:-1]\n # split the authors by ','\n newline = line.split(\",\")\n namelist = newline\n self.coathors.append(namelist) \n \n \n authoridx = []\n \n for name in newline: \n \n # dictonary version \n idx = self.authordict.get(name)\n if idx is not None:\n self.authortitlesidx[idx].append(i)\n self.authorbooktitleidx[idx].append(i)\n\n self.authorcontents[idx] = self.authorcontents[idx] + ' ' + self.titles[i]\n self.authorrawcontents[idx] = self.authorrawcontents[idx] + ' ' + self.rawtitles[i]\n else:\n self.authors.append(name)\n\n self.authordict[name] = m\n\n self.authorcontents.append(self.titles[i])\n self.authorrawcontents.append(self.rawtitles[i])\n \n self.authortitlesidx.append([i])\n self.authorbooktitleidx.append([i])\n\n idx = m\n m = m + 1\n authoridx.append(idx)\n # end dict version\n \n self.coauthorsidx.append(authoridx)\n i = i + 1\n\n f.close()\n\n \n f = codecs.open(self.f_authors_id,'r','utf-8')\n i = 0\n preline = ''\n for line in f:\n if preline != line:\n #print(i)\n #print('preline: {}, line: {}'.format(preline, line))\n if line[-1] == '\\n':\n newline = line[:-1]\n self.id_name[self.authors[i]] = newline\n preline = line\n i = i + 1\n \n else:\n continue\n \n #print(i)\n f.close()\n \n \n # load the per and org classification result\n self.per_org_label()\n \n self.vectorizer = CountVectorizer(max_df=0.95, min_df=1,stop_words='english')\n \n X = self.vectorizer.fit_transform(self.authorcontents)\n \n #Xarray = X.toarray()\n Xarray = X\n \n #plt.plot(hist)\n \n transformer = TfidfTransformer()\n \n self.tfidf = transformer.fit_transform(Xarray)\n #self.tfidfarray = self.tfidf.toarray()\n self.tfidfarray = self.tfidf\n \n self.featurenames = self.vectorizer.get_feature_names()\n\n \n \n \n \n \n \"\"\" \n \"\"\"\n def recommendationV3(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n #idx = self.authors.index(name)\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n name = name.decode('utf-8')\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n \n #content=[]\n \n \n \n self.myidx = authorIdx \n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n \n self.debugmsg('start distance computing \\n', 0)\n (self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)\n self.debugmsg('end distance computing \\n', 0)\n\n # here we can define the range to apply the otsu for recommendations\n # for example self.closeauthordis[0:1000] or all them\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis) \n self.debugmsg('end otsufilter\\n', 0) \n \n # splitidx contains the first index of three groups, close, medium, far\n # now generate three recommendations in each group\n recommendations = []\n \n # save the valid remdidx\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n # skip myself go to next one\n remdinfo = self.getremdinfo(i)\n if remdinfo and ~remdidx.count(i):\n #print remdinfo\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n #self.debugmsg(str(n) + ' ' + str(i), 0)\n \n i = i + 1\n \n # didn't find required number of valid remd untill the end\n # start backwards search\n if (i == len(self.closeauthordis)) or (backwardcount > 1):\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n #self.debugmsg('search backward ' + str(i), 0)\n \n\n # randomlize the order of the recommendations\n random.shuffle(recommendations)\n \n self.result=OrderedDict([(\"name\",name),(\"recommendations\",recommendations)]) \n self.debugmsg('end recommendationV3 \\n', 0)\n return self.result \n \n \"\"\" \n \"\"\"\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n #idx = self.authors.index(name)\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n name = name.decode('utf-8')\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n \n #content=[]\n \n \n \n self.myidx = authorIdx \n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n \n self.debugmsg('start distance computing \\n', 0)\n (self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)\n self.debugmsg('end distance computing \\n', 0)\n\n # here we can define the range to apply the otsu for recommendations\n # for example self.closeauthordis[0:1000] or all them\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis) \n self.debugmsg('end otsufilter\\n', 0) \n \n # splitidx contains the first index of three groups, close, medium, far\n # now generate three recommendations in each group\n recommendations = []\n \n # save the valid remdidx\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n # skip myself go to next one\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n #print remdinfo\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n #self.debugmsg(str(n) + ' ' + str(i), 0)\n \n i = i + 1\n \n # didn't find required number of valid remd untill the end\n # start backwards search\n if (i == len(self.closeauthordis)) or (backwardcount > 1):\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n #self.debugmsg('search backward ' + str(i), 0)\n \n\n # randomlize the order of the recommendations\n random.shuffle(recommendations)\n \n self.result=OrderedDict([(\"name\",name),(\"recommendations\",recommendations)]) \n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result \n \n \"\"\"\n find n nearset neighbors of point p in given space using linear search\n if n == 0, sort all the points in space\n \"\"\"\n def nNNlinesearch(self, space, p, n):\n closeauthordis = []\n \n\n closeauthordis = pairwise_distances(space, p, metric='cosine')\n closeauthordis = closeauthordis.flatten()\n \n closeauthors = closeauthordis.argsort()\n closeauthordis.sort()\n \n if n > 0 :\n closeauthors = closeauthors[0:n]\n closeauthordis = closeauthordis[0:n]\n \n # delete myself, cuz the distance is always 0\n idx = np.where(closeauthors == self.myidx)[0][0]\n \n closeauthors = np.delete(closeauthors, idx)\n closeauthordis = np.delete(closeauthordis, idx)\n \n return (closeauthors, closeauthordis)\n \n\n\n \"\"\"\n split the distance in to 3 groups using otsu filtering\n return the first index of each group\n \"\"\"\n def otsufilter(self, tdis):\n trd = np.zeros(3, int)\n \n #tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis>t1])\n \n # the first index of each group\n# trd[1] = len(tdis[tdis<t1])\n# trd[2] = len(tdis) - len(tdis[tdis>t2])\n \n # get the medium 3 in the medium group\n # get the last 3 in the far group\n trd[1] = len(tdis[tdis<t1]) + int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1\n trd[2] = len(tdis) - 3 \n \n return trd\n\n \"\"\"\n extract the detail inforamtion of the recommendation by its indx in\n the closeauthors\n ignor those unqualified ones which has few papers or not active \n recently, and also remove my co-authors\n \"\"\"\n def getremdinfo(self, clsidx):\n # get the author index from closeauthors\n remdidx = self.closeauthors[clsidx]\n \n recentpub = self.resentpublicationsidx(remdidx)\n \n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n \n if idx.count(self.myidx):\n # remove the coauthor\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[clsidx]),(\"coAuthors\",coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])\n else:\n return []\n\n \"\"\"\n extract the detail inforamtion of the recommendation by its indx in\n the closeauthors\n ignor those unqualified ones which has few papers or not active \n recently, and also remove known people in the mention network\n \"\"\"\n def getremdinfoV2(self, clsidx):\n # get the author index from closeauthors\n remdidx = self.closeauthors[clsidx]\n \n username = self.authors[self.myidx]\n \n recentpub = self.resentpublicationsidx(remdidx)\n \n if recentpub:\n name = self.authors[remdidx]\n #[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n mentionlist = self.mentionnetwork[username]\n \n if name in mentionlist:\n # skip the coauthor\n return []\n \n #\n remdid = self.id_name[name]\n \n if self.labels[remdid] == 'org':\n return []\n \n # get the recommendation's mention list\n coauthors = self.mycoauthorsV4bymentionlist(name)\n \n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[clsidx]),(\"coAuthors\", coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])\n else:\n return []\n\n \"\"\"\n \"\"\"\n def updatedistance(self):\n # 1st degree connection in coauthorship\n deg1con=self.coauthornet[self.myidx,self.closeauthors]\n deg1conidx = np.where(deg1con>0)[0]\n #deg1con = deg1con[deg1con>0]\n \n # 2nd degree connection in coauthorship\n deg2conidx = np.where(deg1con==0)[0]\n deg2con = np.zeros(deg2conidx.size)\n \n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i,self.closeauthors[deg2conidx]]\n \n deg1con = deg1con[deg1con>0]\n \n deg1con = deg1con/max(deg1con)\n return (deg1conidx, deg1con,deg2conidx,deg2con)\n \n \"\"\"\n return the top N recommendations:\n recommendations, coauthors, researchtopics, recentpub(at least 3 and no \n morethan 5 years) \n \"\"\"\n def filteredrecommendations(self, n):\n \n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n \n i = 0\n for name in self.recommendauthor:\n #coauthors = []\n #researchtopic = []\n #recentpub = []\n #coauthorsjson = []\n #[coauthors, idx, c] = self.mycoauthors(name)\n #[coauthors, idx, c] = self.mycoauthorsV2(name)\n #[coauthors, idx, c] = self.mycoauthorsV3(name)\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n\n # remove the coauthors \n if idx.count(self.myidx):\n i = i+1\n continue\n \n recentpub = self.resentpublications(name)\n\n # check if the recentpub is empty which is not active anymore\n if not recentpub:\n i = i+1\n continue\n # -- \n\n self.filteredauthors.append(name) \n \n # take too much time skip in test\n # researchtopic = self.keyword(name)\n researchtopic = []\n researchtopic.append(OrderedDict([(\"topic\", \"TBD\")]))\n \n \n #recommendations.append({'name':name, 'coAuthors':coauthors, 'researchTopcs':researchtopic, 'recentPublications':recentpub} )\n recommendations.append(OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[i]),(\"coAuthors\",coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])) \n #result={'name':user, 'recommendations':recommendations};\n \n # save the picked idx\n self.filteridx.append(i) \n i = i+1\n \n # only need top n recommendations\n \n if len(self.filteridx) == n:\n break\n \n return recommendations\n \n \n \"\"\"\n \"\"\"\n def thresholdrecommendations(self, remds,n):\n \n thredremd = []\n self.trd = np.zeros(3)\n \n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis>t1])\n \n # get the top 3 in each group\n self.trd[1] = len(tdis[tdis<t1])\n self.trd[2] = len(tdis) - len(tdis[tdis>t2])\n \n # get the top 3 in first group, median 3 in second group, \n # last 3 in third group\n# self.trd[1] = int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1\n# self.trd[2] = len(tdis) - 3\n \n \n for i in range(3):\n for j in range(int(n/3)):\n k = int(self.trd[i]+j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n \n return thredremd\n \n\n \n \"\"\"\n \"\"\"\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n \n \n \"\"\"\n \"\"\"\n def save_json(self,filename): \n PROJECT_DIRECTORY = 'output/project/' + project_name + '/'\n with io.open(PROJECT_DIRECTORY + filename +'.json','w',encoding=\"utf-8\") as outfile:\n outfile.write((json.dumps((self.result), ensure_ascii=False)))\n \n \n",
"step-ids": [
21,
25,
35,
43,
47
]
}
|
[
21,
25,
35,
43,
47
] |
# Cookies Keys
class Cookies:
USER_TOKEN = "utoken"
# Session Keys
class Session:
USER_ROOT_ID = "x-root-id"
class APIStatisticsCollection:
API_ACTION = "x-stats-api-action"
DICT_PARAMS = "x-stats-param-dict"
DICT_RESPONSE = "x-stats-resp-dict"
SUCCESS = "x-stats-success"
COLLECT = "x-stats-collect"
# Param Dict Prefix
class ParamDictPrefix:
PostKey = "x-" # Used in http POST params from HTML forms
|
normal
|
{
"blob_id": "d0e5a3a6db0e27ecf157294850a48a19750a5ac2",
"index": 1667,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-2": "<mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-3": "class Cookies:\n <mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-4": "class Cookies:\n USER_TOKEN = 'utoken'\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-5": "# Cookies Keys\nclass Cookies:\n USER_TOKEN = \"utoken\"\n\n\n# Session Keys\nclass Session:\n USER_ROOT_ID = \"x-root-id\"\n\n class APIStatisticsCollection:\n API_ACTION = \"x-stats-api-action\"\n DICT_PARAMS = \"x-stats-param-dict\"\n DICT_RESPONSE = \"x-stats-resp-dict\"\n SUCCESS = \"x-stats-success\"\n\n COLLECT = \"x-stats-collect\"\n\n\n# Param Dict Prefix\nclass ParamDictPrefix:\n PostKey = \"x-\" # Used in http POST params from HTML forms\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/python
#encoding=utf8
import sys
import tushare as ts
def local_main():
if len(sys.argv) != 2:
print sys.argv[0], " [stock id]"
return
stock_id = sys.argv[1]
df = ts.get_hist_data(stock_id)
df.to_excel(stock_id + '_his.xlsx', sheet_name = stock_id)
if __name__ == '__main__':
local_main()
|
normal
|
{
"blob_id": "81a53d08ab36e85dd49cf1f3d9c22c1f18605149",
"index": 6233,
"step-1": "#!/usr/bin/python\n#encoding=utf8\n\nimport sys\nimport tushare as ts\n\ndef local_main():\n if len(sys.argv) != 2:\n print sys.argv[0], \" [stock id]\"\n return\n\n stock_id = sys.argv[1]\n df = ts.get_hist_data(stock_id)\n df.to_excel(stock_id + '_his.xlsx', sheet_name = stock_id)\n\nif __name__ == '__main__':\n local_main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# from magicbot import AutonomousStateMachine, timed_state, state
# from components.drivetrain import Drivetrain, DrivetrainState
# from components.intake import Intake
# from fieldMeasurements import FieldMeasurements
# class PushBotAuto(AutonomousStateMachine):
# # this auto is intended to push other robots off their lines
# MODE_NAME = "PushBot Auto"
# DEFAULT = False
# drivetrain: Drivetrain
# intake: Intake
# @state(first=True)
# def drive_towards_stations(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.initial_drive_distance)
# self.intake.reset()
# self.intake.intake_lift.set_match_start()
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('turn_towards_robot')
# @state()
# def turn_towards_robot(self, initial_call):
# if initial_call:
# self.drivetrain.turn_to_angle(-90)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('drive_towards_robot')
# @state()
# def drive_towards_robot(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.distance_to_bot)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('turn_pre_push_bot')
# @state()
# def turn_pre_push_bot(self, initial_call):
# if initial_call:
# self.drivetrain.turn_to_angle(-90)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('push_bot')
# @state()
# def push_bot(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(
# FieldMeasurements.PushBotAuto.distance_to_bot
# + FieldMeasurements.PushBotAuto.extra_distance
# )
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.done()
|
normal
|
{
"blob_id": "fdef3e94bbeb29c25bf14e17cd1d013cf848bedc",
"index": 9456,
"step-1": "# from magicbot import AutonomousStateMachine, timed_state, state\n\n# from components.drivetrain import Drivetrain, DrivetrainState\n# from components.intake import Intake\n\n# from fieldMeasurements import FieldMeasurements\n\n# class PushBotAuto(AutonomousStateMachine):\n# # this auto is intended to push other robots off their lines\n# MODE_NAME = \"PushBot Auto\"\n# DEFAULT = False\n\n# drivetrain: Drivetrain\n# intake: Intake\n\n# @state(first=True)\n# def drive_towards_stations(self, initial_call):\n# if initial_call:\n# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.initial_drive_distance)\n# self.intake.reset()\n# self.intake.intake_lift.set_match_start()\n# elif self.drivetrain.pid_manager.get_on_target():\n# self.drivetrain.stop()\n# self.next_state('turn_towards_robot')\n\n# @state()\n# def turn_towards_robot(self, initial_call):\n# if initial_call:\n# self.drivetrain.turn_to_angle(-90)\n# elif self.drivetrain.pid_manager.get_on_target():\n# self.drivetrain.stop()\n# self.next_state('drive_towards_robot')\n \n# @state()\n# def drive_towards_robot(self, initial_call):\n# if initial_call:\n# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.distance_to_bot)\n# elif self.drivetrain.pid_manager.get_on_target():\n# self.drivetrain.stop()\n# self.next_state('turn_pre_push_bot')\n \n# @state()\n# def turn_pre_push_bot(self, initial_call):\n# if initial_call:\n# self.drivetrain.turn_to_angle(-90)\n# elif self.drivetrain.pid_manager.get_on_target():\n# self.drivetrain.stop()\n# self.next_state('push_bot')\n\n# @state()\n# def push_bot(self, initial_call):\n# if initial_call:\n# self.drivetrain.drive_to_position(\n# FieldMeasurements.PushBotAuto.distance_to_bot\n# + FieldMeasurements.PushBotAuto.extra_distance\n# )\n# elif self.drivetrain.pid_manager.get_on_target():\n# self.drivetrain.stop()\n# self.done()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import re
from collections import OrderedDict
OPENING_TAG = '<{}>'
CLOSING_TAG= '</{}>'
U_LIST = '<ul>{}</ul>'
LIST_ITEM = '<li>{}</li>'
STRONG = '<strong>{}</strong>'
ITALIC = '<em>{}</em>'
PARAGRAPH = '<p>{}</p>'
HEADERS = OrderedDict({'######': 'h6',
'#####': 'h5',
'####': 'h4',
'###:': 'h3',
'##': 'h2',
'#': 'h1'})
def replace_header_tags(l=''):
for k,v in HEADERS.items():
line_with_header = re.match(f'{k} (.*)', l)
if line_with_header:
rest_string = line_with_header.group(1)
return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)
return l
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + \
STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + \
ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match(r'\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
|
normal
|
{
"blob_id": "6b0b60ec571cf026d0f0cff3d9517362c16b459b",
"index": 6092,
"step-1": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\n<mask token>\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-2": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-3": "<mask token>\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-4": "import re\nfrom collections import OrderedDict\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-5": "import re\nfrom collections import OrderedDict\n\nOPENING_TAG = '<{}>'\nCLOSING_TAG= '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6',\n '#####': 'h5',\n '####': 'h4',\n '###:': 'h3',\n '##': 'h2',\n '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k,v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + \\\n STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + \\\n ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match(r'\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
from django.urls import path,include
from .import views
urlpatterns = [
path('',views.home,name='home'),
path('category/',include('api.category.urls')),
path('product/',include('api.product.urls')),
path('user/',include('api.user.urls')),
path('order/',include('api.order.urls')),
path('payment/',include('api.payment.urls')),
]
|
normal
|
{
"blob_id": "fe12f6d3408ab115c5c440c5b45a9014cfee6539",
"index": 564,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-3": "from django.urls import path, include\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-4": "from django.urls import path,include\nfrom .import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('category/',include('api.category.urls')),\n path('product/',include('api.product.urls')),\n path('user/',include('api.user.urls')),\n path('order/',include('api.order.urls')),\n path('payment/',include('api.payment.urls')),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# Created by: Khang Le
# Created on: Dec 2019
# This program uses lists and rotation
def rotation(list_of_number, ratating_time):
numbers = list_of_number[0]
numbers = [list_of_number[(i + ratating_time) % len(list_of_number)]
for i, x in enumerate(list_of_number)]
return numbers
def main():
lst = []
# number of elemetns as input
user_input = int(input("Enter number of elements : "))
rotating_time = int(input("Enter how many times you want to rotate: "))
print("The numbers are:")
for i in range(0, user_input):
ele = int(input())
lst.append(ele) # adding the element
numbers = rotation(lst, rotating_time)
print("Rotated by {0}: {1}".format(rotating_time, numbers))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "74de0da708c7eb792dea15afb23713d9d71af520",
"index": 5491,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lst = []\n user_input = int(input('Enter number of elements : '))\n rotating_time = int(input('Enter how many times you want to rotate: '))\n print('The numbers are:')\n for i in range(0, user_input):\n ele = int(input())\n lst.append(ele)\n numbers = rotation(lst, rotating_time)\n print('Rotated by {0}: {1}'.format(rotating_time, numbers))\n\n\n<mask token>\n",
"step-3": "def rotation(list_of_number, ratating_time):\n numbers = list_of_number[0]\n numbers = [list_of_number[(i + ratating_time) % len(list_of_number)] for\n i, x in enumerate(list_of_number)]\n return numbers\n\n\ndef main():\n lst = []\n user_input = int(input('Enter number of elements : '))\n rotating_time = int(input('Enter how many times you want to rotate: '))\n print('The numbers are:')\n for i in range(0, user_input):\n ele = int(input())\n lst.append(ele)\n numbers = rotation(lst, rotating_time)\n print('Rotated by {0}: {1}'.format(rotating_time, numbers))\n\n\n<mask token>\n",
"step-4": "def rotation(list_of_number, ratating_time):\n numbers = list_of_number[0]\n numbers = [list_of_number[(i + ratating_time) % len(list_of_number)] for\n i, x in enumerate(list_of_number)]\n return numbers\n\n\ndef main():\n lst = []\n user_input = int(input('Enter number of elements : '))\n rotating_time = int(input('Enter how many times you want to rotate: '))\n print('The numbers are:')\n for i in range(0, user_input):\n ele = int(input())\n lst.append(ele)\n numbers = rotation(lst, rotating_time)\n print('Rotated by {0}: {1}'.format(rotating_time, numbers))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n# Created by: Khang Le\n# Created on: Dec 2019\n# This program uses lists and rotation\n\n\ndef rotation(list_of_number, ratating_time):\n\n numbers = list_of_number[0]\n numbers = [list_of_number[(i + ratating_time) % len(list_of_number)]\n for i, x in enumerate(list_of_number)]\n return numbers\n\n\ndef main():\n\n lst = []\n # number of elemetns as input\n user_input = int(input(\"Enter number of elements : \"))\n rotating_time = int(input(\"Enter how many times you want to rotate: \"))\n print(\"The numbers are:\")\n for i in range(0, user_input):\n ele = int(input())\n lst.append(ele) # adding the element\n numbers = rotation(lst, rotating_time)\n print(\"Rotated by {0}: {1}\".format(rotating_time, numbers))\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
def main(stream=sys.stdin):
"""
Input, output, and parsing, etc. Yeah.
"""
num_cases = int(stream.readline().strip())
for i in xrange(num_cases):
rows, cols = map(int, stream.readline().strip().split())
board = []
for r in xrange(rows):
board = board + [map(int, stream.readline().strip().split())]
if is_board_valid(board, rows, cols):
print "Case #%d: YES" % (i+1,)
else:
print "Case #%d: NO" % (i+1,)
def is_board_valid(board, rows, cols):
"""
>>> is_board_valid([[1,2,1]], 1, 3)
True
"""
return all(all(is_cell_valid(board, r, c) for c in xrange(cols)) for r in xrange(rows))
def is_cell_valid(board, r, c):
"""
>>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 0, 0)
True
>>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 1, 1)
False
"""
return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)
def is_cell_row_valid(board, r, c):
"""
>>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 1, 1)
True
>>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 0, 1)
False
"""
return all(board[r][i] <= board[r][c] for i in xrange(len(board[r])))
def is_cell_col_valid(board, r, c):
"""
>>> is_cell_col_valid([[1,2,1]], 0, 1)
True
"""
return all(board[i][c] <= board[r][c] for i in xrange(len(board)))
if __name__ == '__main__':
import doctest
if doctest.testmod():
main()
|
normal
|
{
"blob_id": "5bcfb0d4fd371a0882dd47814935700eed7885ec",
"index": 6925,
"step-1": "import sys\n\ndef main(stream=sys.stdin):\n \"\"\"\n Input, output, and parsing, etc. Yeah.\n \"\"\"\n num_cases = int(stream.readline().strip())\n for i in xrange(num_cases):\n rows, cols = map(int, stream.readline().strip().split())\n board = []\n for r in xrange(rows):\n board = board + [map(int, stream.readline().strip().split())]\n if is_board_valid(board, rows, cols):\n print \"Case #%d: YES\" % (i+1,)\n else:\n print \"Case #%d: NO\" % (i+1,)\n\ndef is_board_valid(board, rows, cols):\n \"\"\"\n >>> is_board_valid([[1,2,1]], 1, 3)\n True\n \"\"\"\n return all(all(is_cell_valid(board, r, c) for c in xrange(cols)) for r in xrange(rows))\n\ndef is_cell_valid(board, r, c):\n \"\"\"\n >>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 0, 0)\n True\n >>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 1, 1)\n False\n \"\"\"\n return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)\n\ndef is_cell_row_valid(board, r, c):\n \"\"\"\n >>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 1, 1)\n True\n >>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 0, 1)\n False\n \"\"\"\n return all(board[r][i] <= board[r][c] for i in xrange(len(board[r])))\n\ndef is_cell_col_valid(board, r, c):\n \"\"\"\n >>> is_cell_col_valid([[1,2,1]], 0, 1)\n True\n \"\"\"\n return all(board[i][c] <= board[r][c] for i in xrange(len(board)))\n\nif __name__ == '__main__':\n import doctest\n if doctest.testmod():\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from telegram.ext import Dispatcher,CommandHandler,CallbackQueryHandler
import random
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
gifGOODBOAT = 'https://media3.giphy.com/media/3oz8xRQiRlaS1XwnPW/giphy.gif'
gifBADBOAT = 'https://media1.giphy.com/media/l2Je3n9VXC8z3baTe/giphy.gif'
gifGOODMAN = 'https://media4.giphy.com/media/dsKALVnvGKgn4bLu2a/giphy.gif'
gifBADMAN = 'https://media4.giphy.com/media/e37RbTLYjfc1q/giphy.gif'
gifFAST = 'https://media.tenor.com/images/e3cef91b522243efb296f3f5a9b750a6/tenor.gif'
gifSLOW = 'https://media4.giphy.com/media/5qVezULI35guQ/200.gif'
gifYW = 'https://i.imgur.com/L32gUzm.gif'
gifNW = 'https://media1.giphy.com/media/JRbW288WAWvECVyYwx/giphy.gif'
gifFIGHT = 'https://www.spiritshunters.com/wp-content/uploads/2018/09/KRAKEN_v2.gif'
gifRUN = 'https://media3.giphy.com/media/IdTBkruzkGeq1HEnlP/source.gif'
gifDRINK = 'https://media4.giphy.com/media/5zjdD5R7crDK37nqTX/giphy-downsized-medium.gif'
gifDANCE = 'https://media2.giphy.com/media/cm6bHfo16WBokQuFqa/giphy.gif'
gifEARLY = 'https://i.pinimg.com/originals/c6/fb/94/c6fb94ee41fb968e27ad009047f0a4cb.gif'
gifLATE = 'https://media3.giphy.com/media/U2S7MdmsC0O5WDReuE/giphy.gif'
gifFACE = 'https://i.gifer.com/9pd0.gif'
gifAWAY = 'https://media0.giphy.com/media/l0Hedc94pmNdUA2Ji/source.gif'
gifCHECK = 'https://media4.giphy.com/media/3owzW5c1tPq63MPmWk/giphy.gif'
gifHAPPY = 'https://media0.giphy.com/media/3o6Mbl0kpk4i9G2GCQ/source.gif'
gifYEW = 'https://media3.giphy.com/media/3o6Ztn4vuACOmS0Mla/source.gif'
gifNOW = 'https://media1.giphy.com/media/4EF5LwiRfpBKQUOsoF/giphy.gif'
gifISLAND = 'https://thumbs.gfycat.com/SmallIncredibleAttwatersprairiechicken-small.gif'
gifSHIP = 'https://media0.giphy.com/media/3og0ILtBPzSsVnrTry/source.gif'
starttreasureButton = InlineKeyboardButton('开始寻宝吧!', callback_data='treasure:treasure')
goodboatButton = InlineKeyboardButton('好船', callback_data='treasure:goodboat')
badboatButton = InlineKeyboardButton('坏船', callback_data='treasure:badboat')
goodmanButton = InlineKeyboardButton('好水手', callback_data='treasure:goodman')
badmanButton = InlineKeyboardButton('坏水手', callback_data='treasure:badman')
fastButton = InlineKeyboardButton('快一点', callback_data='treasure:fast')
slowButton = InlineKeyboardButton('慢一点', callback_data='treasure:slow')
ywButton = InlineKeyboardButton('去散步', callback_data='treasure:yw')
nwButton = InlineKeyboardButton('不去散步', callback_data='treasure:nw')
fightButton = InlineKeyboardButton('战斗', callback_data='treasure:fight')
runButton = InlineKeyboardButton('逃跑', callback_data='treasure:run')
drinkButton = InlineKeyboardButton('喝酒',callback_data='treasure:drink')
danceButton = InlineKeyboardButton('跳舞',callback_data='treasure:dance')
earlyButton = InlineKeyboardButton('早起',callback_data='treasure:early')
lateButton = InlineKeyboardButton('晚起',callback_data='treasure:late')
awayButton = InlineKeyboardButton('绕开',callback_data='treasure:away')
faceButton = InlineKeyboardButton('迎战',callback_data='treasure:face')
happyButton = InlineKeyboardButton('狂欢',callback_data='treasure:happy')
checkButton = InlineKeyboardButton('查看方位',callback_data='treasure:check')
yewButton = InlineKeyboardButton('放哨',callback_data='treasure:yew')
nowButton = InlineKeyboardButton('不要放哨',callback_data='treasure:now')
islandButton = InlineKeyboardButton('岛上过夜',callback_data='treasure:island')
shipButton = InlineKeyboardButton('回船过夜',callback_data='treasure:ship')
kb2 = InlineKeyboardMarkup([[starttreasureButton]])
kb3 = InlineKeyboardMarkup([[goodboatButton], [badboatButton]])
kb4 = InlineKeyboardMarkup([[goodmanButton], [badmanButton]])
kb5 = InlineKeyboardMarkup([[fastButton], [slowButton]])
kb6 = InlineKeyboardMarkup([[ywButton],[nwButton]])
kb7 = InlineKeyboardMarkup([[fightButton],[runButton]])
kb8 = InlineKeyboardMarkup([[drinkButton],[danceButton]])
kb9 = InlineKeyboardMarkup([[earlyButton],[lateButton]])
kb10 = InlineKeyboardMarkup([[awayButton],[faceButton]])
kb11 = InlineKeyboardMarkup([[happyButton],[checkButton]])
kb12 = InlineKeyboardMarkup([[yewButton],[nowButton]])
kb13 = InlineKeyboardMarkup([[islandButton],[shipButton]])
def buttonCallback(update,context):
query = update.callback_query
if query.data == 'treasure:treasure':
query.answer("开始寻宝")
query.edit_message_text("""
在一个风平浪静的早晨,你突然萌生了一个想法,出海寻宝!首先,你需要一搜船.选哪艘船好呢?
""", reply_markup=kb3)
elif query.data == 'treasure:goodboat':
query.answer("好船")
query.edit_message_animation(gifGOODBOAT,caption='船已经准备好了!现在选好水手还是坏水手?',reply_markup=kb4)
elif query.data == 'treasure:badboat':
query.answer("破船")
query.edit_message_animation(gifBADBOAT,caption='为了省钱,你的船还是太破了。。。你眼睁睁的看着你的积蓄沉入海底,还是回家撸猫吧。。')
elif query.data == 'treasure:goodman':
query.answer("好水手")
query.edit_message_animation(gifGOODMAN,caption='大家和乐融融就像是一家人,接下来选择一个速度吧!',reply_markup=kb5)
elif query.data == 'treasure:badman':
query.answer("坏水手")
query.edit_message_animation(gifBADMAN,caption='你的船员发生了暴动,他们把你扔下了海并开船逃走了。')
elif query.data == 'treasure:fast':
query.answer("快一点")
query.edit_message_animation(gifFAST,caption='你的船开的太快了!彻底散架了!')
elif query.data == 'treasure:slow':
query.answer("慢一点")
query.edit_message_animation(gifSLOW,caption='船员都好兴奋!你想带你都船员们去甲板上散步吗?',reply_markup=kb6)
elif query.data == 'treasure:yw':
query.answer("去散步")
query.edit_message_animation(gifYW,caption='海风吹过面颊,兴奋的船员们安静了下来。哦不!你们遇到了巨乌贼海怪克拉肯!战斗还是逃跑?',reply_markup=kb7)
elif query.data == 'treasure:nw':
query.answer("不去散步")
query.edit_message_animation(gifNW,caption='你的船员都呆在船舱里喝酒而没有看到海怪克拉肯,所有人都被吃掉了!')
elif query.data == 'treasure:fight':
query.answer("战斗")
query.edit_message_animation(gifFIGHT,caption='不愧是久经沙场的老水手,大家齐心协力的把海怪打跑了!接下来做什么呢?',reply_markup=kb8)
elif query.data == "treasure:run":
query.answer("逃跑")
query.edit_message_animation(gifRUN,caption='只见海怪用力一劈,整艘船断成了两截,所有人直接归西...')
elif query.data == 'treasure:drink':
query.answer("喝酒")
query.edit_message_animation(gifDRINK,caption='所有人喝的烂醉如泥。明天是早起还是晚起呢?',reply_markup=kb9)
elif query.data == 'treasure:dance':
query.answer("跳舞")
query.edit_message_animation(gifDANCE,caption='大家载歌载舞,别提多开心了!明天是早起还是晚起呢?',reply_markup=kb9)
elif query.data == 'treasure:early':
query.asnwer("早起")
query.edit_message_animation(gifEARLY,caption='船员们因为休息太少而无精打采,懒洋洋的。仔细一看,远处有个小黑点,哦不!那是海盗船!幸好他们还没发现你们!现在要绕开还是战斗?',reply_markup=kb10)
elif query.data == 'treasure:late':
query.answer("晚起")
query.edit_message_animation(gifLATE,caption='哦不!你们睡觉的时候海盗偷偷登船并杀死了所有人并带着所有的东西溜之大吉了!')
elif query.data == 'treasure:away':
query.answer("绕开")
query.edit_message_animation(gifAWAY,caption='海盗才不吃你这一套,用大炮把你的船打沉了!')
elif query.data == 'treasure:face':
query.answer("迎战")
query.edit_message_animation(gifFACE,caption='战斗的号角已吹响,你在最前面猛砍猛杀,船员们气势高涨!海盗们还是逃走了几个,不过你也没多想!你犯了一个致命的错误...接下来怎么办?',reply_markup=kb11)
elif query.data == 'treasuer:happy':
query.answer("狂欢")
query.edit_message_animation(gifHAPPY,caption='所有人尽情狂欢,船漂到了百慕大三角,所有人葬身海底')
elif query.data == 'treasure:check':
query.answer("查看方位")
query.edit_message_animation(gifCHECK,capyion='你们发现自己离小岛不远了!需要有人放哨吗?',reply_markup=kb12)
elif query.data == 'treasure:yew':
query.answer("放哨")
query.edit_message_animation(gifYEW,caption='幸亏有人放哨,你们及时发现了海盗,把海盗打跑了以后你们取出了埋在沙子里的宝箱!现在要在岛上过夜还是回船?',reply_markup=kb13)
elif query.data == 'treasure:now':
query.answer("不要放哨")
query.edit_message_animation(gifNOW,caption='逃跑的海盗冲出来把所有人都绑架了!')
elif query.data == 'treasure:island':
query.answer("岛上过夜")
query.edit_message_animation(gifISLAND,caption='你和你的船员们一起在岛上的树林里露宿去了,结果被岛上的野人发现并杀死了!')
elif query.data == 'treasure:ship':
query.answer("回船过夜")
query.edit_message_animation(gifSHIP,caption='你和你的船员们把财宝带上了船并回到了港口!你成功了!现在你是独一无二的优秀船长!太厉害啦!!!这 15000XP 50AP 200GP 是从宝箱里取出来的奖励!好好使用它吧!寻宝游戏圆满结束啦!使用 /start 来看看玩点别的什么吧!')
def treasure(update, context):
msg1 = """
欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!
1. 系统会自动给你分配即将要发生的事,做好心理准备!
2. 好的外表不一定有好的结果...
3. 点击按钮开始寻宝!
-----------------------
请开始你的航海之旅吧!祝你玩的开心!
"""
update.message.reply_text(msg1, reply_markup=kb2)
def add_handler(dp:Dispatcher):
treasure_handler = CommandHandler('treasure', treasure)
dp.add_handler(treasure_handler)
dp.add_handler(CallbackQueryHandler(buttonCallback,pattern="^treasure:[A-Za-z0-9_]*"))
|
normal
|
{
"blob_id": "bc3e94c3fb8e563f62fcf0ca628d4aa73c668612",
"index": 7097,
"step-1": "<mask token>\n\n\ndef treasure(update, context):\n msg1 = \"\"\"\n 欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!\n 1. 系统会自动给你分配即将要发生的事,做好心理准备!\n 2. 好的外表不一定有好的结果...\n 3. 点击按钮开始寻宝!\n -----------------------\n 请开始你的航海之旅吧!祝你玩的开心! \n \"\"\"\n update.message.reply_text(msg1, reply_markup=kb2)\n\n\ndef add_handler(dp: Dispatcher):\n treasure_handler = CommandHandler('treasure', treasure)\n dp.add_handler(treasure_handler)\n dp.add_handler(CallbackQueryHandler(buttonCallback, pattern=\n '^treasure:[A-Za-z0-9_]*'))\n",
"step-2": "<mask token>\n\n\ndef buttonCallback(update, context):\n query = update.callback_query\n if query.data == 'treasure:treasure':\n query.answer('开始寻宝')\n query.edit_message_text(\n \"\"\"\n 在一个风平浪静的早晨,你突然萌生了一个想法,出海寻宝!首先,你需要一搜船.选哪艘船好呢?\n \"\"\"\n , reply_markup=kb3)\n elif query.data == 'treasure:goodboat':\n query.answer('好船')\n query.edit_message_animation(gifGOODBOAT, caption=\n '船已经准备好了!现在选好水手还是坏水手?', reply_markup=kb4)\n elif query.data == 'treasure:badboat':\n query.answer('破船')\n query.edit_message_animation(gifBADBOAT, caption=\n '为了省钱,你的船还是太破了。。。你眼睁睁的看着你的积蓄沉入海底,还是回家撸猫吧。。')\n elif query.data == 'treasure:goodman':\n query.answer('好水手')\n query.edit_message_animation(gifGOODMAN, caption=\n '大家和乐融融就像是一家人,接下来选择一个速度吧!', reply_markup=kb5)\n elif query.data == 'treasure:badman':\n query.answer('坏水手')\n query.edit_message_animation(gifBADMAN, caption=\n '你的船员发生了暴动,他们把你扔下了海并开船逃走了。')\n elif query.data == 'treasure:fast':\n query.answer('快一点')\n query.edit_message_animation(gifFAST, caption='你的船开的太快了!彻底散架了!')\n elif query.data == 'treasure:slow':\n query.answer('慢一点')\n query.edit_message_animation(gifSLOW, caption=\n '船员都好兴奋!你想带你都船员们去甲板上散步吗?', reply_markup=kb6)\n elif query.data == 'treasure:yw':\n query.answer('去散步')\n query.edit_message_animation(gifYW, caption=\n '海风吹过面颊,兴奋的船员们安静了下来。哦不!你们遇到了巨乌贼海怪克拉肯!战斗还是逃跑?', reply_markup=kb7)\n elif query.data == 'treasure:nw':\n query.answer('不去散步')\n query.edit_message_animation(gifNW, caption=\n '你的船员都呆在船舱里喝酒而没有看到海怪克拉肯,所有人都被吃掉了!')\n elif query.data == 'treasure:fight':\n query.answer('战斗')\n query.edit_message_animation(gifFIGHT, caption=\n '不愧是久经沙场的老水手,大家齐心协力的把海怪打跑了!接下来做什么呢?', reply_markup=kb8)\n elif query.data == 'treasure:run':\n query.answer('逃跑')\n query.edit_message_animation(gifRUN, caption=\n '只见海怪用力一劈,整艘船断成了两截,所有人直接归西...')\n elif query.data == 'treasure:drink':\n query.answer('喝酒')\n query.edit_message_animation(gifDRINK, caption=\n '所有人喝的烂醉如泥。明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:dance':\n query.answer('跳舞')\n query.edit_message_animation(gifDANCE, caption=\n '大家载歌载舞,别提多开心了!明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:early':\n query.asnwer('早起')\n query.edit_message_animation(gifEARLY, caption=\n '船员们因为休息太少而无精打采,懒洋洋的。仔细一看,远处有个小黑点,哦不!那是海盗船!幸好他们还没发现你们!现在要绕开还是战斗?',\n reply_markup=kb10)\n elif query.data == 'treasure:late':\n query.answer('晚起')\n query.edit_message_animation(gifLATE, caption=\n '哦不!你们睡觉的时候海盗偷偷登船并杀死了所有人并带着所有的东西溜之大吉了!')\n elif query.data == 'treasure:away':\n query.answer('绕开')\n query.edit_message_animation(gifAWAY, caption='海盗才不吃你这一套,用大炮把你的船打沉了!')\n elif query.data == 'treasure:face':\n query.answer('迎战')\n query.edit_message_animation(gifFACE, caption=\n '战斗的号角已吹响,你在最前面猛砍猛杀,船员们气势高涨!海盗们还是逃走了几个,不过你也没多想!你犯了一个致命的错误...接下来怎么办?'\n , reply_markup=kb11)\n elif query.data == 'treasuer:happy':\n query.answer('狂欢')\n query.edit_message_animation(gifHAPPY, caption=\n '所有人尽情狂欢,船漂到了百慕大三角,所有人葬身海底')\n elif query.data == 'treasure:check':\n query.answer('查看方位')\n query.edit_message_animation(gifCHECK, capyion=\n '你们发现自己离小岛不远了!需要有人放哨吗?', reply_markup=kb12)\n elif query.data == 'treasure:yew':\n query.answer('放哨')\n query.edit_message_animation(gifYEW, caption=\n '幸亏有人放哨,你们及时发现了海盗,把海盗打跑了以后你们取出了埋在沙子里的宝箱!现在要在岛上过夜还是回船?',\n reply_markup=kb13)\n elif query.data == 'treasure:now':\n query.answer('不要放哨')\n query.edit_message_animation(gifNOW, caption='逃跑的海盗冲出来把所有人都绑架了!')\n elif query.data == 'treasure:island':\n query.answer('岛上过夜')\n query.edit_message_animation(gifISLAND, caption=\n '你和你的船员们一起在岛上的树林里露宿去了,结果被岛上的野人发现并杀死了!')\n elif query.data == 'treasure:ship':\n query.answer('回船过夜')\n query.edit_message_animation(gifSHIP, caption=\n '你和你的船员们把财宝带上了船并回到了港口!你成功了!现在你是独一无二的优秀船长!太厉害啦!!!这 15000XP 50AP 200GP 是从宝箱里取出来的奖励!好好使用它吧!寻宝游戏圆满结束啦!使用 /start 来看看玩点别的什么吧!'\n )\n\n\ndef treasure(update, context):\n msg1 = \"\"\"\n 欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!\n 1. 系统会自动给你分配即将要发生的事,做好心理准备!\n 2. 好的外表不一定有好的结果...\n 3. 点击按钮开始寻宝!\n -----------------------\n 请开始你的航海之旅吧!祝你玩的开心! \n \"\"\"\n update.message.reply_text(msg1, reply_markup=kb2)\n\n\ndef add_handler(dp: Dispatcher):\n treasure_handler = CommandHandler('treasure', treasure)\n dp.add_handler(treasure_handler)\n dp.add_handler(CallbackQueryHandler(buttonCallback, pattern=\n '^treasure:[A-Za-z0-9_]*'))\n",
"step-3": "<mask token>\ngifGOODBOAT = 'https://media3.giphy.com/media/3oz8xRQiRlaS1XwnPW/giphy.gif'\ngifBADBOAT = 'https://media1.giphy.com/media/l2Je3n9VXC8z3baTe/giphy.gif'\ngifGOODMAN = 'https://media4.giphy.com/media/dsKALVnvGKgn4bLu2a/giphy.gif'\ngifBADMAN = 'https://media4.giphy.com/media/e37RbTLYjfc1q/giphy.gif'\ngifFAST = (\n 'https://media.tenor.com/images/e3cef91b522243efb296f3f5a9b750a6/tenor.gif'\n )\ngifSLOW = 'https://media4.giphy.com/media/5qVezULI35guQ/200.gif'\ngifYW = 'https://i.imgur.com/L32gUzm.gif'\ngifNW = 'https://media1.giphy.com/media/JRbW288WAWvECVyYwx/giphy.gif'\ngifFIGHT = (\n 'https://www.spiritshunters.com/wp-content/uploads/2018/09/KRAKEN_v2.gif')\ngifRUN = 'https://media3.giphy.com/media/IdTBkruzkGeq1HEnlP/source.gif'\ngifDRINK = (\n 'https://media4.giphy.com/media/5zjdD5R7crDK37nqTX/giphy-downsized-medium.gif'\n )\ngifDANCE = 'https://media2.giphy.com/media/cm6bHfo16WBokQuFqa/giphy.gif'\ngifEARLY = (\n 'https://i.pinimg.com/originals/c6/fb/94/c6fb94ee41fb968e27ad009047f0a4cb.gif'\n )\ngifLATE = 'https://media3.giphy.com/media/U2S7MdmsC0O5WDReuE/giphy.gif'\ngifFACE = 'https://i.gifer.com/9pd0.gif'\ngifAWAY = 'https://media0.giphy.com/media/l0Hedc94pmNdUA2Ji/source.gif'\ngifCHECK = 'https://media4.giphy.com/media/3owzW5c1tPq63MPmWk/giphy.gif'\ngifHAPPY = 'https://media0.giphy.com/media/3o6Mbl0kpk4i9G2GCQ/source.gif'\ngifYEW = 'https://media3.giphy.com/media/3o6Ztn4vuACOmS0Mla/source.gif'\ngifNOW = 'https://media1.giphy.com/media/4EF5LwiRfpBKQUOsoF/giphy.gif'\ngifISLAND = (\n 'https://thumbs.gfycat.com/SmallIncredibleAttwatersprairiechicken-small.gif'\n )\ngifSHIP = 'https://media0.giphy.com/media/3og0ILtBPzSsVnrTry/source.gif'\nstarttreasureButton = InlineKeyboardButton('开始寻宝吧!', callback_data=\n 'treasure:treasure')\ngoodboatButton = InlineKeyboardButton('好船', callback_data='treasure:goodboat')\nbadboatButton = InlineKeyboardButton('坏船', callback_data='treasure:badboat')\ngoodmanButton = InlineKeyboardButton('好水手', callback_data='treasure:goodman')\nbadmanButton = InlineKeyboardButton('坏水手', callback_data='treasure:badman')\nfastButton = InlineKeyboardButton('快一点', callback_data='treasure:fast')\nslowButton = InlineKeyboardButton('慢一点', callback_data='treasure:slow')\nywButton = InlineKeyboardButton('去散步', callback_data='treasure:yw')\nnwButton = InlineKeyboardButton('不去散步', callback_data='treasure:nw')\nfightButton = InlineKeyboardButton('战斗', callback_data='treasure:fight')\nrunButton = InlineKeyboardButton('逃跑', callback_data='treasure:run')\ndrinkButton = InlineKeyboardButton('喝酒', callback_data='treasure:drink')\ndanceButton = InlineKeyboardButton('跳舞', callback_data='treasure:dance')\nearlyButton = InlineKeyboardButton('早起', callback_data='treasure:early')\nlateButton = InlineKeyboardButton('晚起', callback_data='treasure:late')\nawayButton = InlineKeyboardButton('绕开', callback_data='treasure:away')\nfaceButton = InlineKeyboardButton('迎战', callback_data='treasure:face')\nhappyButton = InlineKeyboardButton('狂欢', callback_data='treasure:happy')\ncheckButton = InlineKeyboardButton('查看方位', callback_data='treasure:check')\nyewButton = InlineKeyboardButton('放哨', callback_data='treasure:yew')\nnowButton = InlineKeyboardButton('不要放哨', callback_data='treasure:now')\nislandButton = InlineKeyboardButton('岛上过夜', callback_data='treasure:island')\nshipButton = InlineKeyboardButton('回船过夜', callback_data='treasure:ship')\nkb2 = InlineKeyboardMarkup([[starttreasureButton]])\nkb3 = InlineKeyboardMarkup([[goodboatButton], [badboatButton]])\nkb4 = InlineKeyboardMarkup([[goodmanButton], [badmanButton]])\nkb5 = InlineKeyboardMarkup([[fastButton], [slowButton]])\nkb6 = InlineKeyboardMarkup([[ywButton], [nwButton]])\nkb7 = InlineKeyboardMarkup([[fightButton], [runButton]])\nkb8 = InlineKeyboardMarkup([[drinkButton], [danceButton]])\nkb9 = InlineKeyboardMarkup([[earlyButton], [lateButton]])\nkb10 = InlineKeyboardMarkup([[awayButton], [faceButton]])\nkb11 = InlineKeyboardMarkup([[happyButton], [checkButton]])\nkb12 = InlineKeyboardMarkup([[yewButton], [nowButton]])\nkb13 = InlineKeyboardMarkup([[islandButton], [shipButton]])\n\n\ndef buttonCallback(update, context):\n query = update.callback_query\n if query.data == 'treasure:treasure':\n query.answer('开始寻宝')\n query.edit_message_text(\n \"\"\"\n 在一个风平浪静的早晨,你突然萌生了一个想法,出海寻宝!首先,你需要一搜船.选哪艘船好呢?\n \"\"\"\n , reply_markup=kb3)\n elif query.data == 'treasure:goodboat':\n query.answer('好船')\n query.edit_message_animation(gifGOODBOAT, caption=\n '船已经准备好了!现在选好水手还是坏水手?', reply_markup=kb4)\n elif query.data == 'treasure:badboat':\n query.answer('破船')\n query.edit_message_animation(gifBADBOAT, caption=\n '为了省钱,你的船还是太破了。。。你眼睁睁的看着你的积蓄沉入海底,还是回家撸猫吧。。')\n elif query.data == 'treasure:goodman':\n query.answer('好水手')\n query.edit_message_animation(gifGOODMAN, caption=\n '大家和乐融融就像是一家人,接下来选择一个速度吧!', reply_markup=kb5)\n elif query.data == 'treasure:badman':\n query.answer('坏水手')\n query.edit_message_animation(gifBADMAN, caption=\n '你的船员发生了暴动,他们把你扔下了海并开船逃走了。')\n elif query.data == 'treasure:fast':\n query.answer('快一点')\n query.edit_message_animation(gifFAST, caption='你的船开的太快了!彻底散架了!')\n elif query.data == 'treasure:slow':\n query.answer('慢一点')\n query.edit_message_animation(gifSLOW, caption=\n '船员都好兴奋!你想带你都船员们去甲板上散步吗?', reply_markup=kb6)\n elif query.data == 'treasure:yw':\n query.answer('去散步')\n query.edit_message_animation(gifYW, caption=\n '海风吹过面颊,兴奋的船员们安静了下来。哦不!你们遇到了巨乌贼海怪克拉肯!战斗还是逃跑?', reply_markup=kb7)\n elif query.data == 'treasure:nw':\n query.answer('不去散步')\n query.edit_message_animation(gifNW, caption=\n '你的船员都呆在船舱里喝酒而没有看到海怪克拉肯,所有人都被吃掉了!')\n elif query.data == 'treasure:fight':\n query.answer('战斗')\n query.edit_message_animation(gifFIGHT, caption=\n '不愧是久经沙场的老水手,大家齐心协力的把海怪打跑了!接下来做什么呢?', reply_markup=kb8)\n elif query.data == 'treasure:run':\n query.answer('逃跑')\n query.edit_message_animation(gifRUN, caption=\n '只见海怪用力一劈,整艘船断成了两截,所有人直接归西...')\n elif query.data == 'treasure:drink':\n query.answer('喝酒')\n query.edit_message_animation(gifDRINK, caption=\n '所有人喝的烂醉如泥。明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:dance':\n query.answer('跳舞')\n query.edit_message_animation(gifDANCE, caption=\n '大家载歌载舞,别提多开心了!明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:early':\n query.asnwer('早起')\n query.edit_message_animation(gifEARLY, caption=\n '船员们因为休息太少而无精打采,懒洋洋的。仔细一看,远处有个小黑点,哦不!那是海盗船!幸好他们还没发现你们!现在要绕开还是战斗?',\n reply_markup=kb10)\n elif query.data == 'treasure:late':\n query.answer('晚起')\n query.edit_message_animation(gifLATE, caption=\n '哦不!你们睡觉的时候海盗偷偷登船并杀死了所有人并带着所有的东西溜之大吉了!')\n elif query.data == 'treasure:away':\n query.answer('绕开')\n query.edit_message_animation(gifAWAY, caption='海盗才不吃你这一套,用大炮把你的船打沉了!')\n elif query.data == 'treasure:face':\n query.answer('迎战')\n query.edit_message_animation(gifFACE, caption=\n '战斗的号角已吹响,你在最前面猛砍猛杀,船员们气势高涨!海盗们还是逃走了几个,不过你也没多想!你犯了一个致命的错误...接下来怎么办?'\n , reply_markup=kb11)\n elif query.data == 'treasuer:happy':\n query.answer('狂欢')\n query.edit_message_animation(gifHAPPY, caption=\n '所有人尽情狂欢,船漂到了百慕大三角,所有人葬身海底')\n elif query.data == 'treasure:check':\n query.answer('查看方位')\n query.edit_message_animation(gifCHECK, capyion=\n '你们发现自己离小岛不远了!需要有人放哨吗?', reply_markup=kb12)\n elif query.data == 'treasure:yew':\n query.answer('放哨')\n query.edit_message_animation(gifYEW, caption=\n '幸亏有人放哨,你们及时发现了海盗,把海盗打跑了以后你们取出了埋在沙子里的宝箱!现在要在岛上过夜还是回船?',\n reply_markup=kb13)\n elif query.data == 'treasure:now':\n query.answer('不要放哨')\n query.edit_message_animation(gifNOW, caption='逃跑的海盗冲出来把所有人都绑架了!')\n elif query.data == 'treasure:island':\n query.answer('岛上过夜')\n query.edit_message_animation(gifISLAND, caption=\n '你和你的船员们一起在岛上的树林里露宿去了,结果被岛上的野人发现并杀死了!')\n elif query.data == 'treasure:ship':\n query.answer('回船过夜')\n query.edit_message_animation(gifSHIP, caption=\n '你和你的船员们把财宝带上了船并回到了港口!你成功了!现在你是独一无二的优秀船长!太厉害啦!!!这 15000XP 50AP 200GP 是从宝箱里取出来的奖励!好好使用它吧!寻宝游戏圆满结束啦!使用 /start 来看看玩点别的什么吧!'\n )\n\n\ndef treasure(update, context):\n msg1 = \"\"\"\n 欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!\n 1. 系统会自动给你分配即将要发生的事,做好心理准备!\n 2. 好的外表不一定有好的结果...\n 3. 点击按钮开始寻宝!\n -----------------------\n 请开始你的航海之旅吧!祝你玩的开心! \n \"\"\"\n update.message.reply_text(msg1, reply_markup=kb2)\n\n\ndef add_handler(dp: Dispatcher):\n treasure_handler = CommandHandler('treasure', treasure)\n dp.add_handler(treasure_handler)\n dp.add_handler(CallbackQueryHandler(buttonCallback, pattern=\n '^treasure:[A-Za-z0-9_]*'))\n",
"step-4": "from telegram.ext import Dispatcher, CommandHandler, CallbackQueryHandler\nimport random\nfrom telegram import InlineKeyboardMarkup, InlineKeyboardButton\ngifGOODBOAT = 'https://media3.giphy.com/media/3oz8xRQiRlaS1XwnPW/giphy.gif'\ngifBADBOAT = 'https://media1.giphy.com/media/l2Je3n9VXC8z3baTe/giphy.gif'\ngifGOODMAN = 'https://media4.giphy.com/media/dsKALVnvGKgn4bLu2a/giphy.gif'\ngifBADMAN = 'https://media4.giphy.com/media/e37RbTLYjfc1q/giphy.gif'\ngifFAST = (\n 'https://media.tenor.com/images/e3cef91b522243efb296f3f5a9b750a6/tenor.gif'\n )\ngifSLOW = 'https://media4.giphy.com/media/5qVezULI35guQ/200.gif'\ngifYW = 'https://i.imgur.com/L32gUzm.gif'\ngifNW = 'https://media1.giphy.com/media/JRbW288WAWvECVyYwx/giphy.gif'\ngifFIGHT = (\n 'https://www.spiritshunters.com/wp-content/uploads/2018/09/KRAKEN_v2.gif')\ngifRUN = 'https://media3.giphy.com/media/IdTBkruzkGeq1HEnlP/source.gif'\ngifDRINK = (\n 'https://media4.giphy.com/media/5zjdD5R7crDK37nqTX/giphy-downsized-medium.gif'\n )\ngifDANCE = 'https://media2.giphy.com/media/cm6bHfo16WBokQuFqa/giphy.gif'\ngifEARLY = (\n 'https://i.pinimg.com/originals/c6/fb/94/c6fb94ee41fb968e27ad009047f0a4cb.gif'\n )\ngifLATE = 'https://media3.giphy.com/media/U2S7MdmsC0O5WDReuE/giphy.gif'\ngifFACE = 'https://i.gifer.com/9pd0.gif'\ngifAWAY = 'https://media0.giphy.com/media/l0Hedc94pmNdUA2Ji/source.gif'\ngifCHECK = 'https://media4.giphy.com/media/3owzW5c1tPq63MPmWk/giphy.gif'\ngifHAPPY = 'https://media0.giphy.com/media/3o6Mbl0kpk4i9G2GCQ/source.gif'\ngifYEW = 'https://media3.giphy.com/media/3o6Ztn4vuACOmS0Mla/source.gif'\ngifNOW = 'https://media1.giphy.com/media/4EF5LwiRfpBKQUOsoF/giphy.gif'\ngifISLAND = (\n 'https://thumbs.gfycat.com/SmallIncredibleAttwatersprairiechicken-small.gif'\n )\ngifSHIP = 'https://media0.giphy.com/media/3og0ILtBPzSsVnrTry/source.gif'\nstarttreasureButton = InlineKeyboardButton('开始寻宝吧!', callback_data=\n 'treasure:treasure')\ngoodboatButton = InlineKeyboardButton('好船', callback_data='treasure:goodboat')\nbadboatButton = InlineKeyboardButton('坏船', callback_data='treasure:badboat')\ngoodmanButton = InlineKeyboardButton('好水手', callback_data='treasure:goodman')\nbadmanButton = InlineKeyboardButton('坏水手', callback_data='treasure:badman')\nfastButton = InlineKeyboardButton('快一点', callback_data='treasure:fast')\nslowButton = InlineKeyboardButton('慢一点', callback_data='treasure:slow')\nywButton = InlineKeyboardButton('去散步', callback_data='treasure:yw')\nnwButton = InlineKeyboardButton('不去散步', callback_data='treasure:nw')\nfightButton = InlineKeyboardButton('战斗', callback_data='treasure:fight')\nrunButton = InlineKeyboardButton('逃跑', callback_data='treasure:run')\ndrinkButton = InlineKeyboardButton('喝酒', callback_data='treasure:drink')\ndanceButton = InlineKeyboardButton('跳舞', callback_data='treasure:dance')\nearlyButton = InlineKeyboardButton('早起', callback_data='treasure:early')\nlateButton = InlineKeyboardButton('晚起', callback_data='treasure:late')\nawayButton = InlineKeyboardButton('绕开', callback_data='treasure:away')\nfaceButton = InlineKeyboardButton('迎战', callback_data='treasure:face')\nhappyButton = InlineKeyboardButton('狂欢', callback_data='treasure:happy')\ncheckButton = InlineKeyboardButton('查看方位', callback_data='treasure:check')\nyewButton = InlineKeyboardButton('放哨', callback_data='treasure:yew')\nnowButton = InlineKeyboardButton('不要放哨', callback_data='treasure:now')\nislandButton = InlineKeyboardButton('岛上过夜', callback_data='treasure:island')\nshipButton = InlineKeyboardButton('回船过夜', callback_data='treasure:ship')\nkb2 = InlineKeyboardMarkup([[starttreasureButton]])\nkb3 = InlineKeyboardMarkup([[goodboatButton], [badboatButton]])\nkb4 = InlineKeyboardMarkup([[goodmanButton], [badmanButton]])\nkb5 = InlineKeyboardMarkup([[fastButton], [slowButton]])\nkb6 = InlineKeyboardMarkup([[ywButton], [nwButton]])\nkb7 = InlineKeyboardMarkup([[fightButton], [runButton]])\nkb8 = InlineKeyboardMarkup([[drinkButton], [danceButton]])\nkb9 = InlineKeyboardMarkup([[earlyButton], [lateButton]])\nkb10 = InlineKeyboardMarkup([[awayButton], [faceButton]])\nkb11 = InlineKeyboardMarkup([[happyButton], [checkButton]])\nkb12 = InlineKeyboardMarkup([[yewButton], [nowButton]])\nkb13 = InlineKeyboardMarkup([[islandButton], [shipButton]])\n\n\ndef buttonCallback(update, context):\n query = update.callback_query\n if query.data == 'treasure:treasure':\n query.answer('开始寻宝')\n query.edit_message_text(\n \"\"\"\n 在一个风平浪静的早晨,你突然萌生了一个想法,出海寻宝!首先,你需要一搜船.选哪艘船好呢?\n \"\"\"\n , reply_markup=kb3)\n elif query.data == 'treasure:goodboat':\n query.answer('好船')\n query.edit_message_animation(gifGOODBOAT, caption=\n '船已经准备好了!现在选好水手还是坏水手?', reply_markup=kb4)\n elif query.data == 'treasure:badboat':\n query.answer('破船')\n query.edit_message_animation(gifBADBOAT, caption=\n '为了省钱,你的船还是太破了。。。你眼睁睁的看着你的积蓄沉入海底,还是回家撸猫吧。。')\n elif query.data == 'treasure:goodman':\n query.answer('好水手')\n query.edit_message_animation(gifGOODMAN, caption=\n '大家和乐融融就像是一家人,接下来选择一个速度吧!', reply_markup=kb5)\n elif query.data == 'treasure:badman':\n query.answer('坏水手')\n query.edit_message_animation(gifBADMAN, caption=\n '你的船员发生了暴动,他们把你扔下了海并开船逃走了。')\n elif query.data == 'treasure:fast':\n query.answer('快一点')\n query.edit_message_animation(gifFAST, caption='你的船开的太快了!彻底散架了!')\n elif query.data == 'treasure:slow':\n query.answer('慢一点')\n query.edit_message_animation(gifSLOW, caption=\n '船员都好兴奋!你想带你都船员们去甲板上散步吗?', reply_markup=kb6)\n elif query.data == 'treasure:yw':\n query.answer('去散步')\n query.edit_message_animation(gifYW, caption=\n '海风吹过面颊,兴奋的船员们安静了下来。哦不!你们遇到了巨乌贼海怪克拉肯!战斗还是逃跑?', reply_markup=kb7)\n elif query.data == 'treasure:nw':\n query.answer('不去散步')\n query.edit_message_animation(gifNW, caption=\n '你的船员都呆在船舱里喝酒而没有看到海怪克拉肯,所有人都被吃掉了!')\n elif query.data == 'treasure:fight':\n query.answer('战斗')\n query.edit_message_animation(gifFIGHT, caption=\n '不愧是久经沙场的老水手,大家齐心协力的把海怪打跑了!接下来做什么呢?', reply_markup=kb8)\n elif query.data == 'treasure:run':\n query.answer('逃跑')\n query.edit_message_animation(gifRUN, caption=\n '只见海怪用力一劈,整艘船断成了两截,所有人直接归西...')\n elif query.data == 'treasure:drink':\n query.answer('喝酒')\n query.edit_message_animation(gifDRINK, caption=\n '所有人喝的烂醉如泥。明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:dance':\n query.answer('跳舞')\n query.edit_message_animation(gifDANCE, caption=\n '大家载歌载舞,别提多开心了!明天是早起还是晚起呢?', reply_markup=kb9)\n elif query.data == 'treasure:early':\n query.asnwer('早起')\n query.edit_message_animation(gifEARLY, caption=\n '船员们因为休息太少而无精打采,懒洋洋的。仔细一看,远处有个小黑点,哦不!那是海盗船!幸好他们还没发现你们!现在要绕开还是战斗?',\n reply_markup=kb10)\n elif query.data == 'treasure:late':\n query.answer('晚起')\n query.edit_message_animation(gifLATE, caption=\n '哦不!你们睡觉的时候海盗偷偷登船并杀死了所有人并带着所有的东西溜之大吉了!')\n elif query.data == 'treasure:away':\n query.answer('绕开')\n query.edit_message_animation(gifAWAY, caption='海盗才不吃你这一套,用大炮把你的船打沉了!')\n elif query.data == 'treasure:face':\n query.answer('迎战')\n query.edit_message_animation(gifFACE, caption=\n '战斗的号角已吹响,你在最前面猛砍猛杀,船员们气势高涨!海盗们还是逃走了几个,不过你也没多想!你犯了一个致命的错误...接下来怎么办?'\n , reply_markup=kb11)\n elif query.data == 'treasuer:happy':\n query.answer('狂欢')\n query.edit_message_animation(gifHAPPY, caption=\n '所有人尽情狂欢,船漂到了百慕大三角,所有人葬身海底')\n elif query.data == 'treasure:check':\n query.answer('查看方位')\n query.edit_message_animation(gifCHECK, capyion=\n '你们发现自己离小岛不远了!需要有人放哨吗?', reply_markup=kb12)\n elif query.data == 'treasure:yew':\n query.answer('放哨')\n query.edit_message_animation(gifYEW, caption=\n '幸亏有人放哨,你们及时发现了海盗,把海盗打跑了以后你们取出了埋在沙子里的宝箱!现在要在岛上过夜还是回船?',\n reply_markup=kb13)\n elif query.data == 'treasure:now':\n query.answer('不要放哨')\n query.edit_message_animation(gifNOW, caption='逃跑的海盗冲出来把所有人都绑架了!')\n elif query.data == 'treasure:island':\n query.answer('岛上过夜')\n query.edit_message_animation(gifISLAND, caption=\n '你和你的船员们一起在岛上的树林里露宿去了,结果被岛上的野人发现并杀死了!')\n elif query.data == 'treasure:ship':\n query.answer('回船过夜')\n query.edit_message_animation(gifSHIP, caption=\n '你和你的船员们把财宝带上了船并回到了港口!你成功了!现在你是独一无二的优秀船长!太厉害啦!!!这 15000XP 50AP 200GP 是从宝箱里取出来的奖励!好好使用它吧!寻宝游戏圆满结束啦!使用 /start 来看看玩点别的什么吧!'\n )\n\n\ndef treasure(update, context):\n msg1 = \"\"\"\n 欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!\n 1. 系统会自动给你分配即将要发生的事,做好心理准备!\n 2. 好的外表不一定有好的结果...\n 3. 点击按钮开始寻宝!\n -----------------------\n 请开始你的航海之旅吧!祝你玩的开心! \n \"\"\"\n update.message.reply_text(msg1, reply_markup=kb2)\n\n\ndef add_handler(dp: Dispatcher):\n treasure_handler = CommandHandler('treasure', treasure)\n dp.add_handler(treasure_handler)\n dp.add_handler(CallbackQueryHandler(buttonCallback, pattern=\n '^treasure:[A-Za-z0-9_]*'))\n",
"step-5": "from telegram.ext import Dispatcher,CommandHandler,CallbackQueryHandler\nimport random\nfrom telegram import InlineKeyboardMarkup, InlineKeyboardButton\n\ngifGOODBOAT = 'https://media3.giphy.com/media/3oz8xRQiRlaS1XwnPW/giphy.gif'\ngifBADBOAT = 'https://media1.giphy.com/media/l2Je3n9VXC8z3baTe/giphy.gif'\ngifGOODMAN = 'https://media4.giphy.com/media/dsKALVnvGKgn4bLu2a/giphy.gif'\ngifBADMAN = 'https://media4.giphy.com/media/e37RbTLYjfc1q/giphy.gif'\ngifFAST = 'https://media.tenor.com/images/e3cef91b522243efb296f3f5a9b750a6/tenor.gif'\ngifSLOW = 'https://media4.giphy.com/media/5qVezULI35guQ/200.gif'\ngifYW = 'https://i.imgur.com/L32gUzm.gif'\ngifNW = 'https://media1.giphy.com/media/JRbW288WAWvECVyYwx/giphy.gif'\ngifFIGHT = 'https://www.spiritshunters.com/wp-content/uploads/2018/09/KRAKEN_v2.gif'\ngifRUN = 'https://media3.giphy.com/media/IdTBkruzkGeq1HEnlP/source.gif'\ngifDRINK = 'https://media4.giphy.com/media/5zjdD5R7crDK37nqTX/giphy-downsized-medium.gif'\ngifDANCE = 'https://media2.giphy.com/media/cm6bHfo16WBokQuFqa/giphy.gif'\ngifEARLY = 'https://i.pinimg.com/originals/c6/fb/94/c6fb94ee41fb968e27ad009047f0a4cb.gif'\ngifLATE = 'https://media3.giphy.com/media/U2S7MdmsC0O5WDReuE/giphy.gif'\ngifFACE = 'https://i.gifer.com/9pd0.gif'\ngifAWAY = 'https://media0.giphy.com/media/l0Hedc94pmNdUA2Ji/source.gif'\ngifCHECK = 'https://media4.giphy.com/media/3owzW5c1tPq63MPmWk/giphy.gif'\ngifHAPPY = 'https://media0.giphy.com/media/3o6Mbl0kpk4i9G2GCQ/source.gif'\ngifYEW = 'https://media3.giphy.com/media/3o6Ztn4vuACOmS0Mla/source.gif'\ngifNOW = 'https://media1.giphy.com/media/4EF5LwiRfpBKQUOsoF/giphy.gif'\ngifISLAND = 'https://thumbs.gfycat.com/SmallIncredibleAttwatersprairiechicken-small.gif'\ngifSHIP = 'https://media0.giphy.com/media/3og0ILtBPzSsVnrTry/source.gif'\n\n\nstarttreasureButton = InlineKeyboardButton('开始寻宝吧!', callback_data='treasure:treasure')\ngoodboatButton = InlineKeyboardButton('好船', callback_data='treasure:goodboat')\nbadboatButton = InlineKeyboardButton('坏船', callback_data='treasure:badboat')\ngoodmanButton = InlineKeyboardButton('好水手', callback_data='treasure:goodman')\nbadmanButton = InlineKeyboardButton('坏水手', callback_data='treasure:badman')\nfastButton = InlineKeyboardButton('快一点', callback_data='treasure:fast')\nslowButton = InlineKeyboardButton('慢一点', callback_data='treasure:slow')\nywButton = InlineKeyboardButton('去散步', callback_data='treasure:yw')\nnwButton = InlineKeyboardButton('不去散步', callback_data='treasure:nw')\nfightButton = InlineKeyboardButton('战斗', callback_data='treasure:fight')\nrunButton = InlineKeyboardButton('逃跑', callback_data='treasure:run')\ndrinkButton = InlineKeyboardButton('喝酒',callback_data='treasure:drink')\ndanceButton = InlineKeyboardButton('跳舞',callback_data='treasure:dance')\nearlyButton = InlineKeyboardButton('早起',callback_data='treasure:early')\nlateButton = InlineKeyboardButton('晚起',callback_data='treasure:late')\nawayButton = InlineKeyboardButton('绕开',callback_data='treasure:away')\nfaceButton = InlineKeyboardButton('迎战',callback_data='treasure:face')\nhappyButton = InlineKeyboardButton('狂欢',callback_data='treasure:happy')\ncheckButton = InlineKeyboardButton('查看方位',callback_data='treasure:check')\nyewButton = InlineKeyboardButton('放哨',callback_data='treasure:yew')\nnowButton = InlineKeyboardButton('不要放哨',callback_data='treasure:now')\nislandButton = InlineKeyboardButton('岛上过夜',callback_data='treasure:island')\nshipButton = InlineKeyboardButton('回船过夜',callback_data='treasure:ship')\n\n\nkb2 = InlineKeyboardMarkup([[starttreasureButton]])\nkb3 = InlineKeyboardMarkup([[goodboatButton], [badboatButton]])\nkb4 = InlineKeyboardMarkup([[goodmanButton], [badmanButton]])\nkb5 = InlineKeyboardMarkup([[fastButton], [slowButton]])\nkb6 = InlineKeyboardMarkup([[ywButton],[nwButton]])\nkb7 = InlineKeyboardMarkup([[fightButton],[runButton]])\nkb8 = InlineKeyboardMarkup([[drinkButton],[danceButton]])\nkb9 = InlineKeyboardMarkup([[earlyButton],[lateButton]])\nkb10 = InlineKeyboardMarkup([[awayButton],[faceButton]])\nkb11 = InlineKeyboardMarkup([[happyButton],[checkButton]])\nkb12 = InlineKeyboardMarkup([[yewButton],[nowButton]])\nkb13 = InlineKeyboardMarkup([[islandButton],[shipButton]])\n\n\ndef buttonCallback(update,context):\n query = update.callback_query\n if query.data == 'treasure:treasure':\n query.answer(\"开始寻宝\")\n query.edit_message_text(\"\"\"\n 在一个风平浪静的早晨,你突然萌生了一个想法,出海寻宝!首先,你需要一搜船.选哪艘船好呢?\n \"\"\", reply_markup=kb3)\n elif query.data == 'treasure:goodboat':\n query.answer(\"好船\")\n query.edit_message_animation(gifGOODBOAT,caption='船已经准备好了!现在选好水手还是坏水手?',reply_markup=kb4)\n elif query.data == 'treasure:badboat':\n query.answer(\"破船\")\n query.edit_message_animation(gifBADBOAT,caption='为了省钱,你的船还是太破了。。。你眼睁睁的看着你的积蓄沉入海底,还是回家撸猫吧。。')\n elif query.data == 'treasure:goodman':\n query.answer(\"好水手\")\n query.edit_message_animation(gifGOODMAN,caption='大家和乐融融就像是一家人,接下来选择一个速度吧!',reply_markup=kb5)\n elif query.data == 'treasure:badman':\n query.answer(\"坏水手\")\n query.edit_message_animation(gifBADMAN,caption='你的船员发生了暴动,他们把你扔下了海并开船逃走了。')\n elif query.data == 'treasure:fast':\n query.answer(\"快一点\")\n query.edit_message_animation(gifFAST,caption='你的船开的太快了!彻底散架了!')\n elif query.data == 'treasure:slow':\n query.answer(\"慢一点\")\n query.edit_message_animation(gifSLOW,caption='船员都好兴奋!你想带你都船员们去甲板上散步吗?',reply_markup=kb6)\n elif query.data == 'treasure:yw':\n query.answer(\"去散步\")\n query.edit_message_animation(gifYW,caption='海风吹过面颊,兴奋的船员们安静了下来。哦不!你们遇到了巨乌贼海怪克拉肯!战斗还是逃跑?',reply_markup=kb7)\n elif query.data == 'treasure:nw':\n query.answer(\"不去散步\")\n query.edit_message_animation(gifNW,caption='你的船员都呆在船舱里喝酒而没有看到海怪克拉肯,所有人都被吃掉了!')\n elif query.data == 'treasure:fight':\n query.answer(\"战斗\")\n query.edit_message_animation(gifFIGHT,caption='不愧是久经沙场的老水手,大家齐心协力的把海怪打跑了!接下来做什么呢?',reply_markup=kb8)\n elif query.data == \"treasure:run\":\n query.answer(\"逃跑\")\n query.edit_message_animation(gifRUN,caption='只见海怪用力一劈,整艘船断成了两截,所有人直接归西...')\n elif query.data == 'treasure:drink':\n query.answer(\"喝酒\")\n query.edit_message_animation(gifDRINK,caption='所有人喝的烂醉如泥。明天是早起还是晚起呢?',reply_markup=kb9)\n elif query.data == 'treasure:dance':\n query.answer(\"跳舞\")\n query.edit_message_animation(gifDANCE,caption='大家载歌载舞,别提多开心了!明天是早起还是晚起呢?',reply_markup=kb9)\n elif query.data == 'treasure:early':\n query.asnwer(\"早起\")\n query.edit_message_animation(gifEARLY,caption='船员们因为休息太少而无精打采,懒洋洋的。仔细一看,远处有个小黑点,哦不!那是海盗船!幸好他们还没发现你们!现在要绕开还是战斗?',reply_markup=kb10)\n elif query.data == 'treasure:late':\n query.answer(\"晚起\")\n query.edit_message_animation(gifLATE,caption='哦不!你们睡觉的时候海盗偷偷登船并杀死了所有人并带着所有的东西溜之大吉了!')\n elif query.data == 'treasure:away':\n query.answer(\"绕开\")\n query.edit_message_animation(gifAWAY,caption='海盗才不吃你这一套,用大炮把你的船打沉了!')\n elif query.data == 'treasure:face':\n query.answer(\"迎战\")\n query.edit_message_animation(gifFACE,caption='战斗的号角已吹响,你在最前面猛砍猛杀,船员们气势高涨!海盗们还是逃走了几个,不过你也没多想!你犯了一个致命的错误...接下来怎么办?',reply_markup=kb11)\n elif query.data == 'treasuer:happy':\n query.answer(\"狂欢\")\n query.edit_message_animation(gifHAPPY,caption='所有人尽情狂欢,船漂到了百慕大三角,所有人葬身海底')\n elif query.data == 'treasure:check':\n query.answer(\"查看方位\")\n query.edit_message_animation(gifCHECK,capyion='你们发现自己离小岛不远了!需要有人放哨吗?',reply_markup=kb12)\n elif query.data == 'treasure:yew':\n query.answer(\"放哨\")\n query.edit_message_animation(gifYEW,caption='幸亏有人放哨,你们及时发现了海盗,把海盗打跑了以后你们取出了埋在沙子里的宝箱!现在要在岛上过夜还是回船?',reply_markup=kb13)\n elif query.data == 'treasure:now':\n query.answer(\"不要放哨\")\n query.edit_message_animation(gifNOW,caption='逃跑的海盗冲出来把所有人都绑架了!')\n elif query.data == 'treasure:island':\n query.answer(\"岛上过夜\")\n query.edit_message_animation(gifISLAND,caption='你和你的船员们一起在岛上的树林里露宿去了,结果被岛上的野人发现并杀死了!')\n elif query.data == 'treasure:ship':\n query.answer(\"回船过夜\")\n query.edit_message_animation(gifSHIP,caption='你和你的船员们把财宝带上了船并回到了港口!你成功了!现在你是独一无二的优秀船长!太厉害啦!!!这 15000XP 50AP 200GP 是从宝箱里取出来的奖励!好好使用它吧!寻宝游戏圆满结束啦!使用 /start 来看看玩点别的什么吧!')\n\n\ndef treasure(update, context):\n msg1 = \"\"\"\n 欢迎来到寻宝游戏!这是一场惊悚又危险追逐战,智慧,运气和勇气都是成功的关键!记得要避开海盗!读一读规则吧!\n 1. 系统会自动给你分配即将要发生的事,做好心理准备!\n 2. 好的外表不一定有好的结果...\n 3. 点击按钮开始寻宝!\n -----------------------\n 请开始你的航海之旅吧!祝你玩的开心! \n \"\"\"\n update.message.reply_text(msg1, reply_markup=kb2)\n\ndef add_handler(dp:Dispatcher):\n treasure_handler = CommandHandler('treasure', treasure)\n dp.add_handler(treasure_handler)\n dp.add_handler(CallbackQueryHandler(buttonCallback,pattern=\"^treasure:[A-Za-z0-9_]*\"))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import unittest
import math
from python.src.sort.insertion import Insertion
from python.src.sort.selection import Selection
from python.src.sort.shell import Shell
from python.test.util.utilities import Utilities
class ElementarySortTest(unittest.TestCase):
def setUp(self):
self.n = 1000
def test_insertion_sort(self):
insertion = Insertion()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
insertion.sort(expected)
self.assertEqual(expected, actual)
self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)
self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)
def test_insertion_sort_sub_array(self):
insertion = Insertion()
input = Utilities.generate_random_array(self.n)
low = math.floor(0.1 * self.n)
high = math.floor(0.9 * self.n)
insertion.sort(input, low, high)
self.assertTrue(Utilities.is_sorted(input, low, high))
self.assertFalse(Utilities.is_sorted(input, 0, len(input)))
def test_selection_sort(self):
selection = Selection()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
selection.sort(expected)
self.assertEqual(expected, actual)
self.assertEqual(499500, selection.compares)
self.assertGreaterEqual(selection.swaps, 999)
self.assertLessEqual(selection.swaps, 1000)
def test_shell_sort(self):
shell = Shell()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
shell.sort(expected)
self.assertEqual(expected, actual)
self.assertLess(13000, shell.compares)
self.assertLess(8000, shell.swaps)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "779ef8942bfb55bf017a8da9dfe34c03ac574a9a",
"index": 2591,
"step-1": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n <mask token>\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport math\nfrom python.src.sort.insertion import Insertion\nfrom python.src.sort.selection import Selection\nfrom python.src.sort.shell import Shell\nfrom python.test.util.utilities import Utilities\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
import unittest
from datetime import datetime
from models import *
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, "city_id"))
self.assertTrue(hasattr(self.model, "user_id"))
self.assertTrue(hasattr(self.model, "name"))
self.assertTrue(hasattr(self.model, "description"))
self.assertTrue(hasattr(self.model, "number_rooms"))
self.assertTrue(hasattr(self.model, "number_bathrooms"))
self.assertTrue(hasattr(self.model, "max_guest"))
self.assertTrue(hasattr(self.model, "price_by_night"))
self.assertTrue(hasattr(self.model, "latitude"))
self.assertTrue(hasattr(self.model, "longitude"))
self.assertTrue(hasattr(self.model, "amenities"))
self.assertEqual(self.model.city_id, "")
self.assertEqual(self.model.user_id, "")
self.assertEqual(self.model.name, "")
self.assertEqual(self.model.description, "")
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "c7881c0d06600a43bdc01f5e464127c596db6713",
"index": 7993,
"step-1": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, \"city_id\"))\n self.assertTrue(hasattr(self.model, \"user_id\"))\n self.assertTrue(hasattr(self.model, \"name\"))\n self.assertTrue(hasattr(self.model, \"description\"))\n self.assertTrue(hasattr(self.model, \"number_rooms\"))\n self.assertTrue(hasattr(self.model, \"number_bathrooms\"))\n self.assertTrue(hasattr(self.model, \"max_guest\"))\n self.assertTrue(hasattr(self.model, \"price_by_night\"))\n self.assertTrue(hasattr(self.model, \"latitude\"))\n self.assertTrue(hasattr(self.model, \"longitude\"))\n self.assertTrue(hasattr(self.model, \"amenities\"))\n self.assertEqual(self.model.city_id, \"\")\n self.assertEqual(self.model.user_id, \"\")\n self.assertEqual(self.model.name, \"\")\n self.assertEqual(self.model.description, \"\")\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import requests
import json
r = requests.get('http://pythonspot.com/')
jsondata = str(r.headers).replace("'", '"')
print(jsondata)
#headerObj = json.loads(jsondata)
#ERROR >> json.decoder.JSONDecodeError: Expecting ',' delimiter: line 1 column 556 (char 555)
#print(headerObj)["server"]
#print(headerObj)['content-length']
#print(headerObj)['content-encoding']
#print(headerObj)['content-type']
#print(headerObj)['date']
#print(headerObj)['x-powered-by']
## I could not the problem.
|
normal
|
{
"blob_id": "7404dd324d54bb072e56985716bbae746b4dd219",
"index": 1395,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(jsondata)\n",
"step-3": "<mask token>\nr = requests.get('http://pythonspot.com/')\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n",
"step-4": "import requests\nimport json\nr = requests.get('http://pythonspot.com/')\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n",
"step-5": "import requests\nimport json\nr = requests.get('http://pythonspot.com/')\n\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n#headerObj = json.loads(jsondata)\n#ERROR >> json.decoder.JSONDecodeError: Expecting ',' delimiter: line 1 column 556 (char 555)\n\n#print(headerObj)[\"server\"]\n#print(headerObj)['content-length']\n#print(headerObj)['content-encoding']\n#print(headerObj)['content-type']\n#print(headerObj)['date']\n#print(headerObj)['x-powered-by']\n\n## I could not the problem.",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
# create datasets
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
# displaying original dataset
plt.scatter(x, y)
plt.title("Original dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# train dataset will be 80% of the data
train_x = x[:80]
train_y = y[:80]
# test dataset will be remaining 20% of the data
test_x = x[80:]
test_y = y[80:]
# displaying train dataset
plt.scatter(train_x, train_y)
plt.title("Train dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# displaying test dataset
plt.scatter(test_x, test_y)
plt.title("Test dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
|
normal
|
{
"blob_id": "9fd985e9675514f6c8f3ac5b91962eb744e0e82c",
"index": 6514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.seed(2)\n<mask token>\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n<mask token>\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-3": "<mask token>\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-4": "import numpy\nimport matplotlib.pyplot as plt\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-5": "import numpy\nimport matplotlib.pyplot as plt\n\nnumpy.random.seed(2)\n\n# create datasets\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\n\n# displaying original dataset\nplt.scatter(x, y)\nplt.title(\"Original dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# train dataset will be 80% of the data\ntrain_x = x[:80]\ntrain_y = y[:80]\n\n# test dataset will be remaining 20% of the data\ntest_x = x[80:]\ntest_y = y[80:]\n\n# displaying train dataset\nplt.scatter(train_x, train_y)\nplt.title(\"Train dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# displaying test dataset\nplt.scatter(test_x, test_y)\nplt.title(\"Test dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# view_rows.py - Fetch and display the rows from a MySQL database query
# import the MySQLdb and sys modules
# katja seltmann April 16, 2013 to run on arthropod data in scan symbiota database
import MySQLdb
import sys
#connection information from mysql
#test database
connect = MySQLdb.connect("", user="", passwd="", db="")
cursor = connect.cursor ()
def InsertMysql(family,genus,specificEpithet):
try:
cursor.execute ("""INSERT INTO sums2 (occid,family,genus,specificEpithet,coleventsLat,georeferenced) VALUES(NULL,"%s","%s","%s",NULL,NULL);"""% (family,genus,specificEpithet))
connect.commit()
except:
connect.rollback()
def ExecuteMysql():
cursor.execute ("""select distinct family,genus,specificEpithet from omoccurrences where specificEpithet != 'UNKNOWN_NULL'""")
data = cursor.fetchall()
for x in data:
family = x[0]
genus = x[1]
specificEpithet = x[2]
InsertMysql(family,genus,specificEpithet)
def ColeventsNOLAT():
cursor.execute ("""select occid,concat(family, genus, specificEpithet) from sums2""")
data = cursor.fetchall()
for x in data:
concat_string = x[1]
occid = str(x[0])
cursor.execute ("""select count(distinct locality,county,stateProvince,municipality,year,month,day) as colevent from omoccurrences where decimalLatitude = '0.0000' and concat(family,genus,specificEpithet) =""" + "'" + concat_string + "'")
data = cursor.fetchone()
colevent = data[0]
if data:
try:
cursor.execute ("""update sums2 set coleventsLat = '%s' where occid = '%s';"""% (colevent,occid))
connect.commit()
except:
connect.rollback()
def GeoCoordinated():
cursor.execute ("""select sums2.occid,omoccurrences.nameOMOConcat,count(distinct decimalLatitude,decimalLongitude,year,month,day) from omoccurrences join sums2 on omoccurrences.nameOMOConcat = sums2.nameConcat where decimalLatitude !='0.0000' and georeferenced is NULL group by omoccurrences.nameOMOConcat limit 20""")
data = cursor.fetchall()
for x in data:
occid = x[0]
georefenced = x[2]
concat_string = x[1]
print occid
print concat_string
print georefenced
if x:
try:
cursor.execute ("""update sums2 set georeferenced = '%s' where occid = '%s';"""% (georefenced,occid))
connect.commit()
except:
connect.rollback()
#ExecuteMysql()
#ColeventsNOLAT()
GeoCoordinated()
connect.close()
# cursor.execute ("""select occid,nameConcat from sums2 where georeferenced is NULL""")
# data = cursor.fetchall()
# for x in data:
# concat_string = x[1]
# print concat_string
# occid = str(x[0])
# cursor.execute ("""select count(distinct decimalLatitude,decimalLongitude,year,month,day) as locality from omoccurrences where decimalLatitude !='0.0000' and concat(family,genus,specificEpithet) =""" + "'" + concat_string + "'")
# data = cursor.fetchone()
# georefenced = data[0]
# if data:
# try:
# cursor.execute ("""update sums2 set georeferenced = '%s' where occid = '%s';"""% (georefenced,occid))
# connect.commit()
# except:
# connect.rollback()
# +-----------------+--------------+------+-----+---------+----------------+
# | Field | Type | Null | Key | Default | Extra |
# +-----------------+--------------+------+-----+---------+----------------+
# | occid | int(10) | NO | PRI | NULL | auto_increment |
# | family | varchar(255) | YES | | NULL | |
# | scientificName | varchar(255) | YES | | NULL | |
# | genus | varchar(255) | YES | | NULL | |
# | specificEpithet | varchar(255) | YES | | NULL | |
# | coleventsLat | int(10) | YES | | NULL | |
# | georeferenced | int(10) | YES | | NULL | |
# +-----------------+--------------+------+-----+---------+----------------+
|
normal
|
{
"blob_id": "4d066a189bf5151534e0227e67cdc2eed5cd387c",
"index": 6745,
"step-1": "#!/usr/bin/python\n# view_rows.py - Fetch and display the rows from a MySQL database query\n# import the MySQLdb and sys modules\n# katja seltmann April 16, 2013 to run on arthropod data in scan symbiota database\n\nimport MySQLdb\nimport sys\n\n#connection information from mysql\n\n#test database\nconnect = MySQLdb.connect(\"\", user=\"\", passwd=\"\", db=\"\")\n\ncursor = connect.cursor ()\n\n\ndef InsertMysql(family,genus,specificEpithet):\n\ttry:\n\t\tcursor.execute (\"\"\"INSERT INTO sums2 (occid,family,genus,specificEpithet,coleventsLat,georeferenced) VALUES(NULL,\"%s\",\"%s\",\"%s\",NULL,NULL);\"\"\"% (family,genus,specificEpithet))\n\t\tconnect.commit()\n\texcept:\n\t\tconnect.rollback()\n\ndef ExecuteMysql():\n\tcursor.execute (\"\"\"select distinct family,genus,specificEpithet from omoccurrences where specificEpithet != 'UNKNOWN_NULL'\"\"\")\n\tdata = cursor.fetchall()\n\tfor x in data:\n\t\tfamily = x[0]\n\t\tgenus = x[1]\n\t\tspecificEpithet = x[2]\n\t\tInsertMysql(family,genus,specificEpithet)\n\ndef ColeventsNOLAT():\n\tcursor.execute (\"\"\"select occid,concat(family, genus, specificEpithet) from sums2\"\"\")\n\tdata = cursor.fetchall()\n\tfor x in data:\n\t\tconcat_string = x[1]\n\t\toccid = str(x[0])\n\n\t\tcursor.execute (\"\"\"select count(distinct locality,county,stateProvince,municipality,year,month,day) as colevent from omoccurrences where decimalLatitude = '0.0000' and concat(family,genus,specificEpithet) =\"\"\" + \"'\" + concat_string + \"'\")\n\t\tdata = cursor.fetchone()\n\t\tcolevent = data[0]\n\t\tif data:\n\t\t\ttry:\n\t\t\t\tcursor.execute (\"\"\"update sums2 set coleventsLat = '%s' where occid = '%s';\"\"\"% (colevent,occid))\n\t\t\t\tconnect.commit()\n\t\t\texcept:\n\t\t\t\tconnect.rollback()\n\t\t\ndef GeoCoordinated():\n\t\tcursor.execute (\"\"\"select sums2.occid,omoccurrences.nameOMOConcat,count(distinct decimalLatitude,decimalLongitude,year,month,day) from omoccurrences join sums2 on omoccurrences.nameOMOConcat = sums2.nameConcat where decimalLatitude !='0.0000' and georeferenced is NULL group by omoccurrences.nameOMOConcat limit 20\"\"\")\n\t\tdata = cursor.fetchall()\n\t\tfor x in data:\n\t\t\toccid = x[0]\n\t\t\tgeorefenced = x[2]\n\t\t\tconcat_string = x[1]\n\t\t\tprint occid\n\t\t\tprint concat_string\n\t\t\tprint georefenced\n\t\t\tif x:\n\t\t\t\ttry:\n\t\t\t\t\tcursor.execute (\"\"\"update sums2 set georeferenced = '%s' where occid = '%s';\"\"\"% (georefenced,occid))\n\t\t\t\t\tconnect.commit()\n\t\t\t\texcept:\n\t\t\t\t\tconnect.rollback()\t\n\n\n#ExecuteMysql()\n#ColeventsNOLAT()\nGeoCoordinated()\nconnect.close()\n\n\n\t# cursor.execute (\"\"\"select occid,nameConcat from sums2 where georeferenced is NULL\"\"\")\n\t# data = cursor.fetchall()\n\t# for x in data:\n\t# \tconcat_string = x[1]\n\t# \tprint concat_string\n\t# \toccid = str(x[0])\n\n\t\t# cursor.execute (\"\"\"select count(distinct decimalLatitude,decimalLongitude,year,month,day) as locality from omoccurrences where decimalLatitude !='0.0000' and concat(family,genus,specificEpithet) =\"\"\" + \"'\" + concat_string + \"'\")\n\t\t# data = cursor.fetchone()\n\t\t# georefenced = data[0]\n\t\t# if data:\n\t\t# \ttry:\n\t\t# \t\tcursor.execute (\"\"\"update sums2 set georeferenced = '%s' where occid = '%s';\"\"\"% (georefenced,occid))\n\t\t# \t\tconnect.commit()\n\t\t# \texcept:\n\t\t# \t\tconnect.rollback()\n\t\t\n# +-----------------+--------------+------+-----+---------+----------------+\n# | Field | Type | Null | Key | Default | Extra |\n# +-----------------+--------------+------+-----+---------+----------------+\n# | occid | int(10) | NO | PRI | NULL | auto_increment |\n# | family | varchar(255) | YES | | NULL | |\n# | scientificName | varchar(255) | YES | | NULL | |\n# | genus | varchar(255) | YES | | NULL | |\n# | specificEpithet | varchar(255) | YES | | NULL | |\n# | coleventsLat | int(10) | YES | | NULL | |\n# | georeferenced | int(10) | YES | | NULL | |\n# +-----------------+--------------+------+-----+---------+----------------+\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.2.7 on 2021-09-23 07:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sms_consumer', '0006_auto_20210923_0733'),
]
operations = [
migrations.RemoveField(
model_name='smslogmodel',
name='hello',
),
]
|
normal
|
{
"blob_id": "fc9742ceb3c38a5f8c1ad1f030d76103ba0a7a81",
"index": 3857,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sms_consumer', '0006_auto_20210923_0733')]\n operations = [migrations.RemoveField(model_name='smslogmodel', name=\n 'hello')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sms_consumer', '0006_auto_20210923_0733')]\n operations = [migrations.RemoveField(model_name='smslogmodel', name=\n 'hello')]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-09-23 07:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sms_consumer', '0006_auto_20210923_0733'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='smslogmodel',\n name='hello',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from multiprocessing import Pool
import glob
import click
import logging
import pandas as pd
from src.resampling.resampling import Resampler
# Default paths
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores',
type=click.INT,
default=12,
help='The number of workers for parallelization.')
@click.option('--resampling',
type=click.FLOAT,
nargs=3,
default=(1, 1, 1),
help='Expect 3 positive floats describing the output '
'resolution of the resampling. To avoid resampling '
'on one or more dimension a value of -1 can be fed '
'e.g. --resampling 1.0 1.0 -1 will resample the x '
'and y axis at 1 mm/px and left the z axis untouched.')
@click.option('--order',
type=click.INT,
nargs=1,
default=3,
help='The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores, resampling,
order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [
f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)
]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
|
normal
|
{
"blob_id": "3479276d4769518aa60dcd4e1bb41a8a1a7d6517",
"index": 315,
"step-1": "<mask token>\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-3": "<mask token>\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-4": "import os\nfrom multiprocessing import Pool\nimport glob\nimport click\nimport logging\nimport pandas as pd\nfrom src.resampling.resampling import Resampler\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-5": "import os\nfrom multiprocessing import Pool\nimport glob\n\nimport click\nimport logging\nimport pandas as pd\n\nfrom src.resampling.resampling import Resampler\n\n# Default paths\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores',\n type=click.INT,\n default=12,\n help='The number of workers for parallelization.')\[email protected]('--resampling',\n type=click.FLOAT,\n nargs=3,\n default=(1, 1, 1),\n help='Expect 3 positive floats describing the output '\n 'resolution of the resampling. To avoid resampling '\n 'on one or more dimension a value of -1 can be fed '\n 'e.g. --resampling 1.0 1.0 -1 will resample the x '\n 'and y axis at 1 mm/px and left the z axis untouched.')\[email protected]('--order',\n type=click.INT,\n nargs=1,\n default=3,\n help='The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores, resampling,\n order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [\n f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)\n ]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
"""This script draws a boxplot of each atom contribution to the cavity."""
import sys
if sys.version < "2.7":
print >> sys.stderr, "ERROR: This script requires Python 2.7.x. "\
"Please install it and try again."
exit(1)
try:
import matplotlib.pyplot as pyplot
import numpy
except ImportError:
print >> sys.stderr, "ERROR:",
print >> sys.stderr, "This script requires matplotlib and numpy. "\
"Please make sure you installed it and that "\
"your PYTHONPATH is set adequately."
exit(1)
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument("filename", type=isfile,
help="contribution data file")
parser.add_argument("-o", "--output",
help="output file name")
parser.add_argument("-n", type=int, default=0,
help="show n greatest contributions")
parser.add_argument("-s", "--stdev", action="store_true",
help="only plot standard deviations")
parser.add_argument("-r", metavar="residue", nargs="+",
help="plot specific residues along time")
return parser.parse_args()
def die(s):
print >> sys.stderr, "ERROR:", s
exit(1)
def show_usage():
print >> sys.stderr, "usage: python " + sys.argv[0] + " <filename.dat>"
def read_contrib(fname):
data = []
with open(fname, "rt") as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([i+1 for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x+.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.xlim((x[0]-1, x[-1]+1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution standard deviations")
def plot_barplot(data):
x = [i+1 for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution")
def plot_residues(data, residues):
def running_average(x, N):
return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
if "all" in residues:
residues = data[0]
for r in residues:
try:
i = data[0].index(r)
except:
die("No residue named '{0}'".format(r))
# y = running_average(data[1][i], 5)
y = data[1][i]
pyplot.plot(y, label=r)
pyplot.legend(loc="best")
def main():
args = parse_args()
data = read_contrib(args.filename)
if args.n:
data = sorted(data, key=lambda x: med(x[1]), reverse=True)
data = data[:args.n]
data = zip(*data)
if args.r:
plot_residues(data, args.r)
elif args.stdev:
plot_sd(data)
else:
plot_barplot(data)
if args.output:
pyplot.savefig(args.output)
else:
pyplot.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "9fdcaf65f070b7081afd327442dd20e3284c71eb",
"index": 7905,
"step-1": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\n<mask token>\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n data = zip(*data)\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport sys\nif sys.version < '2.7':\n print >> sys.stderr, 'ERROR: This script requires Python 2.7.x. Please install it and try again.'\n exit(1)\ntry:\n import matplotlib.pyplot as pyplot\n import numpy\nexcept ImportError:\n print >> sys.stderr, 'ERROR:'\n print >> sys.stderr, 'This script requires matplotlib and numpy. Please make sure you installed it and that your PYTHONPATH is set adequately.'\n exit(1)\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\ndef plot_residues(data, residues):\n\n def running_average(x, N):\n return numpy.convolve(x, numpy.ones((N,)) / N)[N - 1:]\n if 'all' in residues:\n residues = data[0]\n for r in residues:\n try:\n i = data[0].index(r)\n except:\n die(\"No residue named '{0}'\".format(r))\n y = data[1][i]\n pyplot.plot(y, label=r)\n pyplot.legend(loc='best')\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n data = zip(*data)\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"This script draws a boxplot of each atom contribution to the cavity.\"\"\"\n\n\nimport sys\n\nif sys.version < \"2.7\":\n print >> sys.stderr, \"ERROR: This script requires Python 2.7.x. \"\\\n \"Please install it and try again.\"\n exit(1)\n\ntry:\n import matplotlib.pyplot as pyplot\n import numpy\nexcept ImportError:\n print >> sys.stderr, \"ERROR:\",\n print >> sys.stderr, \"This script requires matplotlib and numpy. \"\\\n \"Please make sure you installed it and that \"\\\n \"your PYTHONPATH is set adequately.\"\n exit(1)\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument(\"filename\", type=isfile,\n help=\"contribution data file\")\n parser.add_argument(\"-o\", \"--output\",\n help=\"output file name\")\n parser.add_argument(\"-n\", type=int, default=0,\n help=\"show n greatest contributions\")\n parser.add_argument(\"-s\", \"--stdev\", action=\"store_true\",\n help=\"only plot standard deviations\")\n parser.add_argument(\"-r\", metavar=\"residue\", nargs=\"+\",\n help=\"plot specific residues along time\")\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, \"ERROR:\", s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, \"usage: python \" + sys.argv[0] + \" <filename.dat>\"\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, \"rt\") as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([i+1 for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x+.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0]-10, ylim[1]+10))\n pyplot.xlim((x[0]-1, x[-1]+1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title(\"Residue contribution standard deviations\")\n\n\ndef plot_barplot(data):\n x = [i+1 for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0]-10, ylim[1]+10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title(\"Residue contribution\")\n\n\ndef plot_residues(data, residues):\n def running_average(x, N):\n return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]\n if \"all\" in residues:\n residues = data[0]\n for r in residues:\n try:\n i = data[0].index(r)\n except:\n die(\"No residue named '{0}'\".format(r))\n# y = running_average(data[1][i], 5)\n y = data[1][i]\n pyplot.plot(y, label=r)\n pyplot.legend(loc=\"best\")\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n\n data = zip(*data)\n\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
# coding=utf-8
class Movie:
def __init__(self,movieid,moviename,score,poster):
self.movieid=movieid
self.moviename=moviename
self.score=score
self.poster=poster
for i in range(1,32):
print("<option value =\""+str(i)+"\">"+str(i)+"</option>")
|
normal
|
{
"blob_id": "856e62cf4cd443c7b3397e926f8fc4fece145f5b",
"index": 3447,
"step-1": "<mask token>\n",
"step-2": "class Movie:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Movie:\n\n def __init__(self, movieid, moviename, score, poster):\n self.movieid = movieid\n self.moviename = moviename\n self.score = score\n self.poster = poster\n\n\n<mask token>\n",
"step-4": "class Movie:\n\n def __init__(self, movieid, moviename, score, poster):\n self.movieid = movieid\n self.moviename = moviename\n self.score = score\n self.poster = poster\n\n\nfor i in range(1, 32):\n print('<option value =\"' + str(i) + '\">' + str(i) + '</option>')\n",
"step-5": "# coding=utf-8\r\nclass Movie:\r\n def __init__(self,movieid,moviename,score,poster):\r\n self.movieid=movieid\r\n self.moviename=moviename\r\n self.score=score\r\n self.poster=poster\r\n\r\nfor i in range(1,32):\r\n print(\"<option value =\\\"\"+str(i)+\"\\\">\"+str(i)+\"</option>\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
def layer_forward(x, w):
"""
input:
- inputs (x): (N, d_1, ..., d_k),
- weights (w): (D, M)
"""
# intermediate value (z)
z = None
output = []
cache = (x, w, z, output)
return output, cache
def layer_backward(d_output, cache):
""" Receive derivative of loss with respect
to outputs and cache, and compute derivative
with respect to inputs
"""
# Unpack cache values
x, w, z, output = cache
# Compute derivatives (gradients)
d_x, d_w = None, None
return d_x, d_w
def affine_forward(x, w, b):
"""
A simple linear feedforward (affine)
input:
- inputs (x): (N, d_1, ..., d_k),
- weights (w): (D, M)
- bias (b): (M,)
return:
- output: (N, M)
- cache: (x, w, b)
"""
N = x.shape[0]
# reshape input into rows
output = x.reshape([N, -1]).dot(w) + b
cache = (x, w, b)
return output, cache
def affine_backward(d_output, cache):
"""
input:
- upstream derivative (d_output): (N, M)
- cache (cache): (x, w)
return:
- gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))
"""
# Unpack cache values
x, w, b = cache
N = d_output.shape[0]
d_x = d_output.dot(w.T).reshape(x.shape)
d_w = x.reshape([N, -1]).T.dot(d_output)
d_b = np.sum(d_output, axis=0)
return d_x, d_w, d_b
def relu_forward(x):
"""
input:
- inputs (x): (N, d_1, ..., d_k)
return:
- output: (N, d_1, ..., d_k)
- cache: x
"""
output = np.fmax(x, 0)
cache = x
return output, cache
def relu_backward(d_output, cache):
"""
input:
- upstream derivative (d_output): (N, d_1, ..., d_k)
- cache for x (cache): (N, d_1, ..., d_k)
return:
- d_x: gradient with respect to x
"""
x = cache
d_x = np.sign(np.fmax(x, 0)) * d_output
return d_x
|
normal
|
{
"blob_id": "c1fd6e940b3b15ae01a102b3c0aba9bd327c77b2",
"index": 8403,
"step-1": "<mask token>\n\n\ndef layer_forward(x, w):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n \"\"\"\n z = None\n output = []\n cache = x, w, z, output\n return output, cache\n\n\n<mask token>\n\n\ndef affine_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, M)\n - cache (cache): (x, w)\n return:\n - gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))\n \"\"\"\n x, w, b = cache\n N = d_output.shape[0]\n d_x = d_output.dot(w.T).reshape(x.shape)\n d_w = x.reshape([N, -1]).T.dot(d_output)\n d_b = np.sum(d_output, axis=0)\n return d_x, d_w, d_b\n\n\ndef relu_forward(x):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k)\n return:\n - output: (N, d_1, ..., d_k)\n - cache: x\n \"\"\"\n output = np.fmax(x, 0)\n cache = x\n return output, cache\n\n\ndef relu_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, d_1, ..., d_k)\n - cache for x (cache): (N, d_1, ..., d_k)\n return:\n - d_x: gradient with respect to x\n \"\"\"\n x = cache\n d_x = np.sign(np.fmax(x, 0)) * d_output\n return d_x\n",
"step-2": "<mask token>\n\n\ndef layer_forward(x, w):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n \"\"\"\n z = None\n output = []\n cache = x, w, z, output\n return output, cache\n\n\n<mask token>\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n A simple linear feedforward (affine)\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n - bias (b): (M,)\n return:\n - output: (N, M)\n - cache: (x, w, b)\n \"\"\"\n N = x.shape[0]\n output = x.reshape([N, -1]).dot(w) + b\n cache = x, w, b\n return output, cache\n\n\ndef affine_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, M)\n - cache (cache): (x, w)\n return:\n - gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))\n \"\"\"\n x, w, b = cache\n N = d_output.shape[0]\n d_x = d_output.dot(w.T).reshape(x.shape)\n d_w = x.reshape([N, -1]).T.dot(d_output)\n d_b = np.sum(d_output, axis=0)\n return d_x, d_w, d_b\n\n\ndef relu_forward(x):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k)\n return:\n - output: (N, d_1, ..., d_k)\n - cache: x\n \"\"\"\n output = np.fmax(x, 0)\n cache = x\n return output, cache\n\n\ndef relu_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, d_1, ..., d_k)\n - cache for x (cache): (N, d_1, ..., d_k)\n return:\n - d_x: gradient with respect to x\n \"\"\"\n x = cache\n d_x = np.sign(np.fmax(x, 0)) * d_output\n return d_x\n",
"step-3": "<mask token>\n\n\ndef layer_forward(x, w):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n \"\"\"\n z = None\n output = []\n cache = x, w, z, output\n return output, cache\n\n\ndef layer_backward(d_output, cache):\n \"\"\" Receive derivative of loss with respect\n to outputs and cache, and compute derivative\n with respect to inputs\n \"\"\"\n x, w, z, output = cache\n d_x, d_w = None, None\n return d_x, d_w\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n A simple linear feedforward (affine)\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n - bias (b): (M,)\n return:\n - output: (N, M)\n - cache: (x, w, b)\n \"\"\"\n N = x.shape[0]\n output = x.reshape([N, -1]).dot(w) + b\n cache = x, w, b\n return output, cache\n\n\ndef affine_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, M)\n - cache (cache): (x, w)\n return:\n - gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))\n \"\"\"\n x, w, b = cache\n N = d_output.shape[0]\n d_x = d_output.dot(w.T).reshape(x.shape)\n d_w = x.reshape([N, -1]).T.dot(d_output)\n d_b = np.sum(d_output, axis=0)\n return d_x, d_w, d_b\n\n\ndef relu_forward(x):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k)\n return:\n - output: (N, d_1, ..., d_k)\n - cache: x\n \"\"\"\n output = np.fmax(x, 0)\n cache = x\n return output, cache\n\n\ndef relu_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, d_1, ..., d_k)\n - cache for x (cache): (N, d_1, ..., d_k)\n return:\n - d_x: gradient with respect to x\n \"\"\"\n x = cache\n d_x = np.sign(np.fmax(x, 0)) * d_output\n return d_x\n",
"step-4": "import numpy as np\n\n\ndef layer_forward(x, w):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n \"\"\"\n z = None\n output = []\n cache = x, w, z, output\n return output, cache\n\n\ndef layer_backward(d_output, cache):\n \"\"\" Receive derivative of loss with respect\n to outputs and cache, and compute derivative\n with respect to inputs\n \"\"\"\n x, w, z, output = cache\n d_x, d_w = None, None\n return d_x, d_w\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n A simple linear feedforward (affine)\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n - bias (b): (M,)\n return:\n - output: (N, M)\n - cache: (x, w, b)\n \"\"\"\n N = x.shape[0]\n output = x.reshape([N, -1]).dot(w) + b\n cache = x, w, b\n return output, cache\n\n\ndef affine_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, M)\n - cache (cache): (x, w)\n return:\n - gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))\n \"\"\"\n x, w, b = cache\n N = d_output.shape[0]\n d_x = d_output.dot(w.T).reshape(x.shape)\n d_w = x.reshape([N, -1]).T.dot(d_output)\n d_b = np.sum(d_output, axis=0)\n return d_x, d_w, d_b\n\n\ndef relu_forward(x):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k)\n return:\n - output: (N, d_1, ..., d_k)\n - cache: x\n \"\"\"\n output = np.fmax(x, 0)\n cache = x\n return output, cache\n\n\ndef relu_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, d_1, ..., d_k)\n - cache for x (cache): (N, d_1, ..., d_k)\n return:\n - d_x: gradient with respect to x\n \"\"\"\n x = cache\n d_x = np.sign(np.fmax(x, 0)) * d_output\n return d_x\n",
"step-5": "import numpy as np\n\n\ndef layer_forward(x, w):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n \"\"\"\n # intermediate value (z)\n z = None\n output = []\n cache = (x, w, z, output)\n\n return output, cache\n\n\ndef layer_backward(d_output, cache):\n \"\"\" Receive derivative of loss with respect\n to outputs and cache, and compute derivative\n with respect to inputs\n \"\"\"\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n A simple linear feedforward (affine)\n input:\n - inputs (x): (N, d_1, ..., d_k),\n - weights (w): (D, M)\n - bias (b): (M,)\n return:\n - output: (N, M)\n - cache: (x, w, b)\n \"\"\"\n N = x.shape[0]\n\n # reshape input into rows\n output = x.reshape([N, -1]).dot(w) + b\n cache = (x, w, b)\n\n return output, cache\n\n\ndef affine_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, M)\n - cache (cache): (x, w)\n return:\n - gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))\n \"\"\"\n\n # Unpack cache values\n x, w, b = cache\n\n N = d_output.shape[0]\n d_x = d_output.dot(w.T).reshape(x.shape)\n d_w = x.reshape([N, -1]).T.dot(d_output)\n d_b = np.sum(d_output, axis=0)\n\n return d_x, d_w, d_b\n\n\ndef relu_forward(x):\n \"\"\"\n input:\n - inputs (x): (N, d_1, ..., d_k)\n return:\n - output: (N, d_1, ..., d_k)\n - cache: x\n \"\"\"\n output = np.fmax(x, 0)\n cache = x\n\n return output, cache\n\n\ndef relu_backward(d_output, cache):\n \"\"\"\n input:\n - upstream derivative (d_output): (N, d_1, ..., d_k)\n - cache for x (cache): (N, d_1, ..., d_k)\n return:\n - d_x: gradient with respect to x\n \"\"\"\n x = cache\n d_x = np.sign(np.fmax(x, 0)) * d_output\n\n return d_x\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
name = 'Ледяная скорбь'
description = 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'
price = 3000
fightable = True
def fight_use(user, reply, room):
return 200
|
normal
|
{
"blob_id": "7254e74ff3f562613cc610e4816a2d92b6b1cd4c",
"index": 6074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fight_use(user, reply, room):\n return 200\n",
"step-3": "name = 'Ледяная скорбь'\ndescription = (\n 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'\n )\nprice = 3000\nfightable = True\n\n\ndef fight_use(user, reply, room):\n return 200\n",
"step-4": "name = 'Ледяная скорбь'\ndescription = 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'\nprice = 3000\n\nfightable = True\n\ndef fight_use(user, reply, room):\n\treturn 200",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#coding:utf-8
'''
Created on 2016年8月29日
@author: lichen
'''
def custom_proc(request):
"""
自定义context_processors
"""
return {
"context_test":"test"
}
|
normal
|
{
"blob_id": "43ecb173e3d306284f2122410b5b74945572f683",
"index": 8104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef custom_proc(request):\n \"\"\"\n 自定义context_processors\n \"\"\"\n return {'context_test': 'test'}\n",
"step-3": "#!/usr/bin/env python\n#coding:utf-8\n\n'''\nCreated on 2016年8月29日\n\n@author: lichen\n'''\n\ndef custom_proc(request):\n \"\"\"\n 自定义context_processors\n \"\"\"\n return {\n \"context_test\":\"test\"\n }",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
复习
面向对象:考虑问题从对象的角度出发.
抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),
抽出共性的本质(重要的)过程。
三大特征:
封装:将每个变化点单独分解到不同的类中。
例如:老张开车去东北
做法:定义人类,定义车类。
继承:重用现有类的功能和概念,并在此基础上进行扩展。
统一概念
例如:图形管理器,统计圆形/矩形.....面积。
做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.
多态:调用父"抽象的"方法,执行子类"具体的"方法.
重写:覆盖父类那个比较抽象的方法。
例如:图形管理器调用图形的计算面积方法
具体图形必须重写图形的计算面积方法。
继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。
设计原则
开闭原则:允许增加新功能,不允许修改客户端代码.
单一职责:一个有且只有一个改变的原因.
依赖倒置:调用抽象(父),不要调用具体(子);
抽象不要依赖具体.
组合复用:如果仅仅是代码的复用,优先使用组合.
类与类关系
泛化[继承](做成爸爸)
关联(做成成员变量)
依赖(做成方法参数)
"""
|
normal
|
{
"blob_id": "2749a262bf8da99aa340e878c15a6dba01acc38c",
"index": 7025,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n 复习\n 面向对象:考虑问题从对象的角度出发.\n 抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),\n 抽出共性的本质(重要的)过程。\n 三大特征:\n 封装:将每个变化点单独分解到不同的类中。\n 例如:老张开车去东北\n 做法:定义人类,定义车类。\n\n 继承:重用现有类的功能和概念,并在此基础上进行扩展。\n 统一概念\n 例如:图形管理器,统计圆形/矩形.....面积。\n 做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.\n\n 多态:调用父\"抽象的\"方法,执行子类\"具体的\"方法.\n 重写:覆盖父类那个比较抽象的方法。\n 例如:图形管理器调用图形的计算面积方法\n 具体图形必须重写图形的计算面积方法。\n 继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。\n\n 设计原则\n 开闭原则:允许增加新功能,不允许修改客户端代码.\n 单一职责:一个有且只有一个改变的原因.\n 依赖倒置:调用抽象(父),不要调用具体(子);\n 抽象不要依赖具体.\n 组合复用:如果仅仅是代码的复用,优先使用组合.\n\n 类与类关系\n 泛化[继承](做成爸爸)\n 关联(做成成员变量)\n 依赖(做成方法参数)\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import whoosh.index as index
from whoosh.fields import *
from whoosh.qparser import MultifieldParser
from whoosh import scoring
w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
fieldnames = ["bill_text", "bill_title", "year", "sponsor_name", "subject"]
boosts = {"bill_text": 1, "bill_title": 2.5, "year": 0, "sponsor_name": 0, "subject": 2.0}
#load index:
ix = index.open_dir("final_index")
writer = ix.writer()
#search:
def results(q):
hits = []
with ix.searcher(weighting=w) as searcher:
query = MultifieldParser(fieldnames, ix.schema, fieldboosts=boosts).parse(q)
results = searcher.search_page(query, 1, pagelen=10)
print "\n" + str(len(results)) + " results found!"
print "Displaying top ten results:"
for result in results:
if result["house_or_senate"] == "h":
hs = "hr"
billnum = "hr" + str(result["bill_number"])
isih = "ih"
elif result["house_or_senate"] == "s":
hs = "s"
billnum = "s" + str(result["bill_number"])
isih = "is"
url = "https://www.govtrack.us/data/congress/" + str(result["congress_number"]) + "/bills/" + hs + "/" + hs + str(result["bill_number"]) + "/text-versions/" + isih + "/document.txt"
hits.append({"bill_title":result["bill_title"], "year":result["year"], "url":url, "sponsor_name":result["sponsor_name"]})
return hits
query = raw_input("\nSearch for a term in bill text: ")
query = query.lstrip()
print results(query)
|
normal
|
{
"blob_id": "6a400419c26c62471dfc6893cc2d1ff6d88e49f4",
"index": 7518,
"step-1": "import whoosh.index as index\nfrom whoosh.fields import *\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh import scoring\n\nw = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)\nfieldnames = [\"bill_text\", \"bill_title\", \"year\", \"sponsor_name\", \"subject\"]\nboosts = {\"bill_text\": 1, \"bill_title\": 2.5, \"year\": 0, \"sponsor_name\": 0, \"subject\": 2.0}\n\n#load index:\nix = index.open_dir(\"final_index\")\nwriter = ix.writer()\n\n#search:\ndef results(q):\n\thits = []\n\twith ix.searcher(weighting=w) as searcher:\n\t\tquery = MultifieldParser(fieldnames, ix.schema, fieldboosts=boosts).parse(q)\n\t\tresults = searcher.search_page(query, 1, pagelen=10)\n\t\tprint \"\\n\" + str(len(results)) + \" results found!\"\n\t\tprint \"Displaying top ten results:\"\n\t\tfor result in results:\n\t\t\tif result[\"house_or_senate\"] == \"h\":\n\t\t\t\ths = \"hr\"\n\t\t\t\tbillnum = \"hr\" + str(result[\"bill_number\"])\n\t\t\t\tisih = \"ih\"\n\t\t\telif result[\"house_or_senate\"] == \"s\":\n\t\t\t\ths = \"s\"\n\t\t\t\tbillnum = \"s\" + str(result[\"bill_number\"])\n\t\t\t\tisih = \"is\"\n\t\t\t\n\t\t\turl = \"https://www.govtrack.us/data/congress/\" + str(result[\"congress_number\"]) + \"/bills/\" + hs + \"/\" + hs + str(result[\"bill_number\"]) + \"/text-versions/\" + isih + \"/document.txt\" \n\t\t\thits.append({\"bill_title\":result[\"bill_title\"], \"year\":result[\"year\"], \"url\":url, \"sponsor_name\":result[\"sponsor_name\"]})\n\treturn hits\n\nquery = raw_input(\"\\nSearch for a term in bill text: \")\nquery = query.lstrip()\n\nprint results(query)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from tkinter import *
root = Tk()
root.title("Calculator")
e = Entry(root, width = 50, borderwidth = 5)
e.grid(row = 0, column = 0, columnspan = 4, padx = 10, pady = 20)
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = "addition"
first_num = e.get()
e.delete(0, END)
def button_mul():
global first_num
global math
math = "multiplication"
first_num = e.get()
e.delete(0, END)
def button_sub():
global first_num
global math
math = "subtraction"
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = "division"
first_num = e.get()
e.delete(0, END)
def button_equal():
sec_num = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, int(first_num) + int(sec_num))
if math == "multiplication":
e.insert(0, int(first_num) * int(sec_num))
if math == "subtraction":
e.insert(0, int(first_num) - int(sec_num))
if math == "division":
e.insert(0, int(first_num) / int(sec_num))
def clear():
e.delete(0, END)
#creating buttons
button_1 = Button(root, text = "1", height = 5, width = 10,command = lambda:button_click(1))
button_2 = Button(root, text = "2", height = 5, width = 10, command = lambda:button_click(2))
button_3 = Button(root, text = "3", height = 5, width = 10, command = lambda:button_click(3))
button_4 = Button(root, text = "4", height = 5, width = 10, command = lambda:button_click(4))
button_5 = Button(root, text = "5", height = 5, width = 10, command = lambda:button_click(5))
button_6 = Button(root, text = "6", height = 5, width = 10, command = lambda:button_click(6))
button_7 = Button(root, text = "7", height = 5, width = 10, command = lambda:button_click(7))
button_8 = Button(root, text = "8", height = 5, width = 10, command = lambda:button_click(8))
button_9 = Button(root, text = "9", height = 5, width = 10, command = lambda:button_click(9))
button_0 = Button(root, text = "0", height = 5, width = 10, command = lambda:button_click(0))
button_add = Button(root, text = "+", height = 5, width = 10, bg = "#A1CAE2", command = button_add)
button_mul = Button(root, text = "*", height = 5, width = 10, bg = "#A1CAE2", command = button_mul)
button_sub = Button(root, text = "-", height = 5, width = 10, bg = "#A1CAE2", command = button_sub)
button_div = Button(root, text = "/", height = 5, width = 10, bg = "#A1CAE2", command = button_div)
button_equal = Button(root, text = "=", height = 5, width = 10, bg = "#A1CAE2", command = button_equal)
button_clear = Button(root, text = "Clear", height = 5, width = 10, bg = "#A1CAE2", command = clear)
#placing buttons
button_1.grid(row = 3, column = 0)
button_2.grid(row = 3, column = 1)
button_3.grid(row = 3, column = 2)
button_4.grid(row = 2, column = 0)
button_5.grid(row = 2, column = 1)
button_6.grid(row = 2, column = 2)
button_7.grid(row = 1, column = 0)
button_8.grid(row = 1, column = 1)
button_9.grid(row = 1, column = 2)
button_0.grid(row = 4, column = 0)
button_add.grid(row = 4, column = 1)
button_sub.grid(row = 1, column = 4)
button_mul.grid(row = 2, column = 4)
button_div.grid(row = 3, column = 4)
button_equal.grid(row = 4, column = 2)
button_clear.grid(row = 4, column = 4)
root.mainloop()
|
normal
|
{
"blob_id": "59a75f78c7a146dcf55d43be90f71abce2bcf753",
"index": 4934,
"step-1": "<mask token>\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_mul():\n global first_num\n global math\n math = 'multiplication'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_mul():\n global first_num\n global math\n math = 'multiplication'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_equal():\n sec_num = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, int(first_num) + int(sec_num))\n if math == 'multiplication':\n e.insert(0, int(first_num) * int(sec_num))\n if math == 'subtraction':\n e.insert(0, int(first_num) - int(sec_num))\n if math == 'division':\n e.insert(0, int(first_num) / int(sec_num))\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-5": "from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Calculator\")\r\n\r\ne = Entry(root, width = 50, borderwidth = 5)\r\ne.grid(row = 0, column = 0, columnspan = 4, padx = 10, pady = 20)\r\n\r\ndef button_click(number):\r\n\tdigit = e.get()\r\n\te.delete(0, END)\r\n\te.insert(0, str(digit) + str(number))\r\n\r\ndef button_add():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"addition\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_mul():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"multiplication\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_sub():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"subtraction\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_div():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"division\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_equal():\t\r\n\tsec_num = e.get()\r\n\te.delete(0, END)\r\n\tif math == \"addition\":\r\n\t\te.insert(0, int(first_num) + int(sec_num))\r\n\tif math == \"multiplication\":\r\n\t\te.insert(0, int(first_num) * int(sec_num))\r\n\tif math == \"subtraction\":\r\n\t\te.insert(0, int(first_num) - int(sec_num))\r\n\tif math == \"division\":\r\n\t\te.insert(0, int(first_num) / int(sec_num))\r\n\r\ndef clear():\r\n\te.delete(0, END)\r\n\r\n\t\r\n#creating buttons\r\nbutton_1 = Button(root, text = \"1\", height = 5, width = 10,command = lambda:button_click(1))\r\nbutton_2 = Button(root, text = \"2\", height = 5, width = 10, command = lambda:button_click(2))\r\nbutton_3 = Button(root, text = \"3\", height = 5, width = 10, command = lambda:button_click(3))\r\nbutton_4 = Button(root, text = \"4\", height = 5, width = 10, command = lambda:button_click(4))\r\nbutton_5 = Button(root, text = \"5\", height = 5, width = 10, command = lambda:button_click(5))\r\nbutton_6 = Button(root, text = \"6\", height = 5, width = 10, command = lambda:button_click(6))\r\nbutton_7 = Button(root, text = \"7\", height = 5, width = 10, command = lambda:button_click(7))\r\nbutton_8 = Button(root, text = \"8\", height = 5, width = 10, command = lambda:button_click(8))\r\nbutton_9 = Button(root, text = \"9\", height = 5, width = 10, command = lambda:button_click(9))\r\nbutton_0 = Button(root, text = \"0\", height = 5, width = 10, command = lambda:button_click(0))\r\n\r\nbutton_add = Button(root, text = \"+\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_add)\r\nbutton_mul = Button(root, text = \"*\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_mul)\r\nbutton_sub = Button(root, text = \"-\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_sub)\r\nbutton_div = Button(root, text = \"/\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_div)\r\nbutton_equal = Button(root, text = \"=\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_equal)\r\nbutton_clear = Button(root, text = \"Clear\", height = 5, width = 10, bg = \"#A1CAE2\", command = clear)\r\n\r\n#placing buttons\r\nbutton_1.grid(row = 3, column = 0)\r\nbutton_2.grid(row = 3, column = 1)\r\nbutton_3.grid(row = 3, column = 2)\r\nbutton_4.grid(row = 2, column = 0)\r\nbutton_5.grid(row = 2, column = 1)\r\nbutton_6.grid(row = 2, column = 2)\r\nbutton_7.grid(row = 1, column = 0)\r\nbutton_8.grid(row = 1, column = 1)\r\nbutton_9.grid(row = 1, column = 2)\r\nbutton_0.grid(row = 4, column = 0)\r\n\r\nbutton_add.grid(row = 4, column = 1)\r\nbutton_sub.grid(row = 1, column = 4)\r\nbutton_mul.grid(row = 2, column = 4)\r\nbutton_div.grid(row = 3, column = 4)\r\nbutton_equal.grid(row = 4, column = 2)\r\nbutton_clear.grid(row = 4, column = 4)\r\n\r\nroot.mainloop()",
"step-ids": [
4,
5,
6,
7,
11
]
}
|
[
4,
5,
6,
7,
11
] |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import math
from tkinter import *
from tkinter.ttk import *
from facedetectandtrack import *
x_vals = []
root = Tk()
counter=0
#def graph():
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals,0]
y = data.iloc[x_vals,1]
if counter>10:
x_vals.pop(0)
plt.cla()
axes=plt.gca()
axes.set_ylim([0,30])
#plt.plot(x, y)
counter=counter+1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width/2
screen_y1 = height/2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = (X*X)
d_y = (Y*Y)
D = d_x + d_y
distance = math.sqrt(D)
#print(distance)
plt.scatter(counter ,distance, s= 50,linewidth=1)
plt.xlabel("Time")
plt.ylabel("Movement of student from the center of screen")
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.savefig("Scatter_Graph.png")
plt.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "239f055fd76a3ecb5f384c256ad850ea42739b8f",
"index": 9710,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\n<mask token>\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-5": "\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import * \nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\n \nx_vals = []\nroot = Tk()\n\n\ncounter=0\n#def graph():\nplt.style.use('seaborn')\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals,0]\n y = data.iloc[x_vals,1] \n if counter>10:\n x_vals.pop(0)\n\n plt.cla()\n axes=plt.gca()\n axes.set_ylim([0,30])\n #plt.plot(x, y)\n counter=counter+1\n\n height = root.winfo_screenheight() \n width = root.winfo_screenwidth() \n screen_x1 = width/2\n screen_y1 = height/2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = (X*X)\n d_y = (Y*Y)\n D = d_x + d_y\n distance = math.sqrt(D)\n #print(distance)\n plt.scatter(counter ,distance, s= 50,linewidth=1)\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"Movement of student from the center of screen\")\n\n\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig(\"Scatter_Graph.png\")\n\nplt.tight_layout()\nplt.show()",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
g=int(input())
num=0
while(g>0):
num=num+g
g=g-1
print(num)
|
normal
|
{
"blob_id": "8b18f098080c3f5773aa04dffaff0639fe7fa74f",
"index": 8886,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile g > 0:\n num = num + g\n g = g - 1\nprint(num)\n",
"step-3": "g = int(input())\nnum = 0\nwhile g > 0:\n num = num + g\n g = g - 1\nprint(num)\n",
"step-4": "g=int(input())\nnum=0\nwhile(g>0):\n num=num+g\n g=g-1\nprint(num)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import sys
def add_them(a, b):
return a + b
def main():
print add_them(10, 21)
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "aebf1d64923c5f325c9d429be092deaa06f20963",
"index": 6232,
"step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef add_them(a, b):\n return a + b\n\ndef main():\n print add_them(10, 21)\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rest_framework import viewsets
from recruitment.serializers.LocationSerializer import LocationSerializer
from recruitment.models.Location import Location
import django_filters
class LocationViewSet(viewsets.ModelViewSet):
queryset = Location.objects.all().filter(deleted=0)
serializer_class = LocationSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
|
normal
|
{
"blob_id": "aef45cb8ea9fcaeffcca147da7637536bcc4b226",
"index": 6217,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LocationViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LocationViewSet(viewsets.ModelViewSet):\n queryset = Location.objects.all().filter(deleted=0)\n serializer_class = LocationSerializer\n filter_backends = django_filters.rest_framework.DjangoFilterBackend,\n",
"step-4": "from rest_framework import viewsets\nfrom recruitment.serializers.LocationSerializer import LocationSerializer\nfrom recruitment.models.Location import Location\nimport django_filters\n\n\nclass LocationViewSet(viewsets.ModelViewSet):\n queryset = Location.objects.all().filter(deleted=0)\n serializer_class = LocationSerializer\n filter_backends = django_filters.rest_framework.DjangoFilterBackend,\n",
"step-5": "from rest_framework import viewsets\nfrom recruitment.serializers.LocationSerializer import LocationSerializer\nfrom recruitment.models.Location import Location\n\nimport django_filters\n\n\nclass LocationViewSet(viewsets.ModelViewSet):\n queryset = Location.objects.all().filter(deleted=0)\n serializer_class = LocationSerializer\n filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import pandas as pd
from scipy import sparse, io
import cPickle as pickle
import sys
sys.path.append('code')
import models
import split
from itertools import chain
def test_simple_instance(items, item_numbers, negative_items, user):
model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)
model.fit(items)
user_items = items[user-1]
#negative_items = [str(-1 * int(x)) for x in unliked_list[user-1]]
negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]
#negative_items=[]
recommendations = model.recommend(user_items, negative_items, num_items=50)
return recommendations
def test_simple_instance_test1(test_user, items, item_numbers, negative_items, user):
'''
Returns list of tuples representing item recommendations.
- based on user's liked_list and disliked_list
- contains cosine similarities of the recommendations
'''
model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)
model.fit(items)
user_items = items[user-1]
negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]
final = model.recommend2(test_user, user_items, negative_items, num_items=100)
return final # movieId
def extract_names(recommendations, movies_contents):
'''
Returns and print the name of the itmes.
It contains the name and the genre.
'''
recommended_list = list(zip(*recommendations)[0]) # movieId
# adjusting index by subtracting 1
recommended_list = [int(x) for x in recommended_list]
extracted = movies_contents.ix[recommended_list,:]
print extracted
print '\n'
return extracted
def examine_the_results2(final_extracted, rtmp, user):
'''
Returns the actual ratings of the recommend items from the user.
Used for evaluating the performance of the recommender.
'''
idx = final_extracted.index
examine_list = []
rt = list(chain.from_iterable(rtmp[user-1].toarray().tolist()))
for i in idx:
r = rt[i-1]
examine_list.append(r)
print examine_list
return examine_list
def examine_the_results3(final_extracted, test_user, user):
'''
Returns the actual ratings of the recommend items from the user.
Used for evaluating the performance of the recommender.
'''
idx = final_extracted.index
examine_list = []
for i in idx:
r = int( test_user[test_user['movieId'] == i].rating )
examine_list.append(r)
print examine_list
return examine_list
def rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user):
'''
User-specific processes.
'''
recommendations = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print recommendations
# what are the names of the recommended movies
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
return examine_list
def testing(train_user, test_user, items, item_numbers, disliked_list, rtmp, user):
# old
negative_items = disliked_list
recommendations = test_simple_instance(items, item_numbers, negative_items, user)
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
# new
final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print final
print "final"
#recommended_list = list(zip(*recommendations)[0])
#recommended_list = [int(x)-1 for x in recommended_list]
# final_extracted = movies_contents.ix[final,:]
# print "User {}'s ratings on Recommended movis".format(user)
# examine_list = examine_the_results2(final_extracted, rtmp, user)
print 'train_user.shape',train_user.shape
print 'test_user.shape',test_user.shape
print 'train_user.movieId.unique()',train_user.movieId.unique().size
print 'test_user.movieId.unique()',test_user.movieId.unique().size
# final=[]
# for movie in recommended_list:
# if movie in train_user.movieId.unique():
# print "movie ",movie,' is in train_user'
# if movie in test_user.movieId.unique():
# final.append(movie)
# print "movie ",movie,' is in test_user'
print "Final movies"
final_extracted = movies_contents.ix[final,:]
print "User {}'s ratings on Final movies".format(user)
final_examine_list = examine_the_results3(final_extracted, test_user, user)
print final_examine_list
# user 654(userId 654) All recommendations
# [('186', 0.999988317489624),
# ('208', 0.9999874234199524),
# ('527', 0.9999861121177673),
# ('153', 0.9999856948852539),
# ('125', 0.9999853372573853),
# ('588', 0.9999845027923584),
# ('204', 0.9999845027923584),
# ('485', 0.9999840259552002),
# ('216', 0.9999839067459106),
# ('172', 0.9999837875366211),
# ('419', 0.9999837875366211),
# ('132', 0.9999836683273315),
# ('451', 0.9999836683273315),
# ('202', 0.9999836087226868),
# ('11', 0.9999832510948181),
# ('182', 0.9999831914901733),
# ('71', 0.9999830722808838),
# ('234', 0.9999829530715942),
# ('83', 0.9999829530715942),
# ('237', 0.9999825954437256),
# ('228', 0.999982476234436),
# ('82', 0.9999821782112122),
# ('223', 0.9999821186065674),
# ('385', 0.9999821186065674),
# ('96', 0.9999818801879883),
# ('501', 0.9999818801879883),
# ('95', 0.999981701374054),
# ('1', 0.9999816417694092),
# ('196', 0.9999814629554749),
# ('684', 0.9999814033508301),
# ('288', 0.9999814033508301),
# ('200', 0.9999813437461853),
# ('199', 0.9999813437461853),
# ('28', 0.9999812841415405),
# ('144', 0.9999812841415405),
# ('121', 0.999981164932251),
# ('423', 0.9999811053276062),
# ('484', 0.9999809265136719),
# ('655', 0.9999808073043823),
# ('663', 0.9999805688858032),
# ('174', 0.9999805688858032),
# ('568', 0.9999803304672241),
# ('432', 0.9999803304672241),
# ('69', 0.9999802112579346),
# ('257', 0.9999802112579346),
# ('183', 0.9999801516532898),
# ('179', 0.9999799728393555),
# ('735', 0.9999799728393555),
# ('168', 0.9999799728393555),
# ('181', 0.9999799728393555)]
# user 654(userId 654) Final recommendations
#[588, 71, 196, 144, 98, 83, 82, 69, 204, 568, 215, 174, 317, 66, 269, 735]
# User 654's ratings on Final movies
# [4, 3, 5, 5, 5, 5, 5, 4, 4, 4, 4, 5, 4, 4, 4, 4]
# later...
# def iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user):
# num_iter = 10
# examine_list_iter = []
# for i in xrange(num_iter):
# print 'iteration number: ', i+1
# element = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)
# examine_list_iter.append(element)
# print examine_list_iter
# return examine_list_iter
def count_num_ratings_per_users(ratings_as_mat):
'''
To identify the number of ratings per users
'''
count_per_users = {}
tmp = sparse.csr_matrix(ratings_as_mat)
num_users = tmp.shape[0]
for i in xrange(num_users):
ratings_list = [int(x) for x in list(chain.from_iterable(tmp[i].toarray().tolist())) if x > 0]
count_per_users[i+1] = len(ratings_list)
if i % 100 == 0:
print '<counting number of ratings for user>', i , ' out of ', num_users
return count_per_users
def recall_at_M(test_user, final_examine_list, num_items):
'''
Number of items user i liked among the top M items (test set)
recall @ M = --------------------------------------------------------------
Total number of items user i likes (test set)
'''
# first define "likes"
# likes = ratings over 4
numerator = len([x for x in final_examine_list if x >=4])
denominator = len(final_examine_list) # M
return float(numerator) / denominator
if __name__=="__main__":
up=5; down=2
user = 181
# how many movies do users rated?
# count_ratings_per_users = count_num_ratings_per_users(ratings_as_mat)
# count_ratings_per_users = pd.DataFrame(count_ratings_per_users.items(), columns = ['userId','num_ratings'])
# count_ratings_per_users = count_ratings_per_users.sort_values(by = 'num_ratings', axis =0, ascending = False)
# In [145]: count_ratings_per_users
# Out[145]:
# userId num_ratings
# 12 13 526
# 404 405 520
# 654 655 443
# 449 450 423
# 275 276 400
# 302 303 385
# 233 234 369
# 536 537 349
# 6 7 328
# 845 846 321
# 392 393 318
# 307 308 314
# 278 279 308
# 180 181 308
# 93 94 306
# 91 92 296
# 428 429 295
# 416 417 292
# 879 880 288
# 757 758 287
# 221 222 286
# 434 435 285
# 292 293 280
# 200 201 279
# 377 378 276
# 560 561 275
# 129 130 273
# 681 682 273
# 591 592 271
# 58 59 270
'''
User 13's ratings on Final movies
[5, 4, 4, 4, 4, 5, 2, 4, 2, 3, 3, 4, 4, 5]
User 405's ratings on Final movies
[3, 5, 4, 5, 3, 4, 3, 4, 5, 3, 4, 5, 5, 3, 5, 5, 5, 3]
User 450's ratings on Final movies
[4, 5, 3, 5, 3, 4, 5, 5, 3, 3, 5, 3, 5, 4, 3, 5, 4, 4, 5, 3, 4, 4]
User 276's ratings on Final movies
[5, 4, 5, 5, 4, 4, 4, 5, 4, 4, 4, 5]
User 276's ratings on Final movies
[5, 4, 4, 4, 5, 4, 4, 4, 4, 4, 4, 5, 4]
User 303's ratings on Final movies
[4]
User 303's ratings on Final movies
[4]
User 234's ratings on Final movies
[3, 3, 4]
User 234's ratings on Final movies
[3, 4, 2, 3]
User 537's ratings on Final movies
[3, 3, 1, 3, 3, 3, 3, 2, 2]
User 537's ratings on Final movies
[2, 1, 3, 3, 2, 3]
User 7's ratings on Final movies
[5, 4, 5, 5, 1, 3, 5, 5, 5, 5, 5, 5, 4]
User 7's ratings on Final movies
[5, 5, 5, 5, 3, 5, 1, 5, 4, 5, 4, 5, 4, 5]
User 846's ratings on Final movies
[]
User 393's ratings on Final movies
[3, 3, 3, 4]
User 308's ratings on Final movies
[4, 3, 4, 3, 3, 4, 4]
User 279's ratings on Final movies
[3, 3, 3, 1, 4, 5, 4, 3, 3, 4, 1, 5, 4, 3]
User 181's ratings on Final movies
[1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2]
User 94's ratings on Final movies
[5, 5, 4, 5, 4, 3, 5]
User 92's ratings on Final movies
[4, 3, 3, 4]
User 429's ratings on Final movies
[5, 4, 5, 5, 5, 5, 4, 4]
User 417's ratings on Final movies
[4, 3]
'''
# preprocessing the data
# ratings_as_mat = pickle.load(open('data/ratings_as_mat_train.pkl', 'rb'))
# movies_as_mat = pickle.load(open('data/movies_as_mat_train_.pkl', 'rb'))
# movies_contents = pd.read_csv("data/movies.csv")
# movies_contents = movies_contents.set_index('movieId')
train_df = pickle.load(open('data/train_df.pkl', 'rb'))
test_df = pickle.load(open('data/test_df.pkl', 'rb'))
train_user = train_df[train_df['userId'] == user] # user already rated. used for training
test_user = test_df[test_df['userId'] == user] # user already rated. used for validation
# items, item_numbers, disliked_list from train_df
items = pickle.load(open('data/items_train_{}.pkl'.format((up,down)), 'rb'))
item_numbers = pickle.load(open('data/item_numbers_train_{}.pkl'.format((up,down)), 'rb'))
disliked_list = pickle.load(open('data/disliked_list_train_{}.pkl'.format((up,down)), 'rb'))
rtmp = sparse.csr_matrix(ratings_as_mat)
# examine_list = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)
#testing(train_df, test_df, items, item_numbers, disliked_list, rtmp, user)
# copy and paste... of the function testing
negative_items = disliked_list
recommendations = test_simple_instance(items, item_numbers, negative_items, user)
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print final
print "final"
#recommended_list = list(zip(*recommendations)[0])
#recommended_list = [int(x)-1 for x in recommended_list]
# final_extracted = movies_contents.ix[final,:]
# print "User {}'s ratings on Recommended movis".format(user)
# examine_list = examine_the_results2(final_extracted, rtmp, user)
print 'train_user.shape',train_user.shape
print 'test_user.shape',test_user.shape
print 'train_user.movieId.unique()',train_user.movieId.unique().size
print 'test_user.movieId.unique()',test_user.movieId.unique().size
# final=[]
# for movie in recommended_list:
# if movie in train_user.movieId.unique():
# print "movie ",movie,' is in train_user'
# if movie in test_user.movieId.unique():
# final.append(movie)
# print "movie ",movie,' is in test_user'
print "Final movies"
final_extracted = movies_contents.ix[final,:]
print "User {}'s ratings on Final movies".format(user)
final_examine_list = examine_the_results3(final_extracted, test_user, user)
# later...
#examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, ratings_as_mat, user=654)
#examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user=654)
#or row in examine_list_iter for x in row if x > 0])
|
normal
|
{
"blob_id": "04dc4d46a645a23913e33606c500037d37418cd7",
"index": 8114,
"step-1": "import numpy as np\nimport pandas as pd\nfrom scipy import sparse, io\nimport cPickle as pickle\nimport sys\nsys.path.append('code')\nimport models\nimport split\nfrom itertools import chain\n\ndef test_simple_instance(items, item_numbers, negative_items, user):\n model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)\n model.fit(items)\n user_items = items[user-1]\n #negative_items = [str(-1 * int(x)) for x in unliked_list[user-1]]\n negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]\n #negative_items=[]\n recommendations = model.recommend(user_items, negative_items, num_items=50)\n return recommendations\n\ndef test_simple_instance_test1(test_user, items, item_numbers, negative_items, user):\n '''\n Returns list of tuples representing item recommendations.\n - based on user's liked_list and disliked_list\n - contains cosine similarities of the recommendations\n '''\n model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)\n model.fit(items)\n user_items = items[user-1]\n negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]\n final = model.recommend2(test_user, user_items, negative_items, num_items=100)\n return final # movieId\n\ndef extract_names(recommendations, movies_contents):\n '''\n Returns and print the name of the itmes.\n It contains the name and the genre.\n '''\n recommended_list = list(zip(*recommendations)[0]) # movieId\n # adjusting index by subtracting 1\n recommended_list = [int(x) for x in recommended_list]\n extracted = movies_contents.ix[recommended_list,:]\n print extracted\n print '\\n'\n return extracted\n\ndef examine_the_results2(final_extracted, rtmp, user):\n '''\n Returns the actual ratings of the recommend items from the user.\n Used for evaluating the performance of the recommender.\n '''\n idx = final_extracted.index\n examine_list = []\n rt = list(chain.from_iterable(rtmp[user-1].toarray().tolist()))\n for i in idx:\n r = rt[i-1]\n examine_list.append(r)\n print examine_list\n return examine_list\n\ndef examine_the_results3(final_extracted, test_user, user):\n '''\n Returns the actual ratings of the recommend items from the user.\n Used for evaluating the performance of the recommender.\n '''\n idx = final_extracted.index\n examine_list = []\n for i in idx:\n r = int( test_user[test_user['movieId'] == i].rating )\n examine_list.append(r)\n print examine_list\n return examine_list\n\ndef rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user):\n '''\n User-specific processes.\n '''\n recommendations = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)\n print recommendations\n # what are the names of the recommended movies\n print \"Recommended movies\"\n extracted = extract_names(recommendations, movies_contents)\n\n print \"User {}'s ratings on Recommended movis\".format(user)\n examine_list = examine_the_results2(extracted, rtmp, user)\n return examine_list\n\n\ndef testing(train_user, test_user, items, item_numbers, disliked_list, rtmp, user):\n # old\n negative_items = disliked_list\n recommendations = test_simple_instance(items, item_numbers, negative_items, user)\n print \"Recommended movies\"\n extracted = extract_names(recommendations, movies_contents)\n print \"User {}'s ratings on Recommended movis\".format(user)\n examine_list = examine_the_results2(extracted, rtmp, user)\n\n # new\n final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)\n print final\n print \"final\"\n #recommended_list = list(zip(*recommendations)[0])\n #recommended_list = [int(x)-1 for x in recommended_list]\n # final_extracted = movies_contents.ix[final,:]\n # print \"User {}'s ratings on Recommended movis\".format(user)\n # examine_list = examine_the_results2(final_extracted, rtmp, user)\n\n print 'train_user.shape',train_user.shape\n print 'test_user.shape',test_user.shape\n print 'train_user.movieId.unique()',train_user.movieId.unique().size\n print 'test_user.movieId.unique()',test_user.movieId.unique().size\n\n # final=[]\n # for movie in recommended_list:\n # if movie in train_user.movieId.unique():\n # print \"movie \",movie,' is in train_user'\n # if movie in test_user.movieId.unique():\n # final.append(movie)\n # print \"movie \",movie,' is in test_user'\n print \"Final movies\"\n final_extracted = movies_contents.ix[final,:]\n print \"User {}'s ratings on Final movies\".format(user)\n final_examine_list = examine_the_results3(final_extracted, test_user, user)\n print final_examine_list\n\n\n# user 654(userId 654) All recommendations\n# [('186', 0.999988317489624),\n# ('208', 0.9999874234199524),\n# ('527', 0.9999861121177673),\n# ('153', 0.9999856948852539),\n# ('125', 0.9999853372573853),\n# ('588', 0.9999845027923584),\n# ('204', 0.9999845027923584),\n# ('485', 0.9999840259552002),\n# ('216', 0.9999839067459106),\n# ('172', 0.9999837875366211),\n# ('419', 0.9999837875366211),\n# ('132', 0.9999836683273315),\n# ('451', 0.9999836683273315),\n# ('202', 0.9999836087226868),\n# ('11', 0.9999832510948181),\n# ('182', 0.9999831914901733),\n# ('71', 0.9999830722808838),\n# ('234', 0.9999829530715942),\n# ('83', 0.9999829530715942),\n# ('237', 0.9999825954437256),\n# ('228', 0.999982476234436),\n# ('82', 0.9999821782112122),\n# ('223', 0.9999821186065674),\n# ('385', 0.9999821186065674),\n# ('96', 0.9999818801879883),\n# ('501', 0.9999818801879883),\n# ('95', 0.999981701374054),\n# ('1', 0.9999816417694092),\n# ('196', 0.9999814629554749),\n# ('684', 0.9999814033508301),\n# ('288', 0.9999814033508301),\n# ('200', 0.9999813437461853),\n# ('199', 0.9999813437461853),\n# ('28', 0.9999812841415405),\n# ('144', 0.9999812841415405),\n# ('121', 0.999981164932251),\n# ('423', 0.9999811053276062),\n# ('484', 0.9999809265136719),\n# ('655', 0.9999808073043823),\n# ('663', 0.9999805688858032),\n# ('174', 0.9999805688858032),\n# ('568', 0.9999803304672241),\n# ('432', 0.9999803304672241),\n# ('69', 0.9999802112579346),\n# ('257', 0.9999802112579346),\n# ('183', 0.9999801516532898),\n# ('179', 0.9999799728393555),\n# ('735', 0.9999799728393555),\n# ('168', 0.9999799728393555),\n# ('181', 0.9999799728393555)]\n\n# user 654(userId 654) Final recommendations\n#[588, 71, 196, 144, 98, 83, 82, 69, 204, 568, 215, 174, 317, 66, 269, 735]\n# User 654's ratings on Final movies\n# [4, 3, 5, 5, 5, 5, 5, 4, 4, 4, 4, 5, 4, 4, 4, 4]\n\n\n# later...\n# def iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user):\n# num_iter = 10\n# examine_list_iter = []\n# for i in xrange(num_iter):\n# print 'iteration number: ', i+1\n# element = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)\n# examine_list_iter.append(element)\n# print examine_list_iter\n# return examine_list_iter\n\ndef count_num_ratings_per_users(ratings_as_mat):\n '''\n To identify the number of ratings per users\n '''\n count_per_users = {}\n tmp = sparse.csr_matrix(ratings_as_mat)\n num_users = tmp.shape[0]\n for i in xrange(num_users):\n ratings_list = [int(x) for x in list(chain.from_iterable(tmp[i].toarray().tolist())) if x > 0]\n count_per_users[i+1] = len(ratings_list)\n if i % 100 == 0:\n print '<counting number of ratings for user>', i , ' out of ', num_users\n return count_per_users\n\ndef recall_at_M(test_user, final_examine_list, num_items):\n '''\n Number of items user i liked among the top M items (test set)\n recall @ M = --------------------------------------------------------------\n Total number of items user i likes (test set)\n '''\n # first define \"likes\"\n # likes = ratings over 4\n numerator = len([x for x in final_examine_list if x >=4])\n denominator = len(final_examine_list) # M\n return float(numerator) / denominator\n\n\n\n\n\nif __name__==\"__main__\":\n up=5; down=2\n user = 181\n\n # how many movies do users rated?\n # count_ratings_per_users = count_num_ratings_per_users(ratings_as_mat)\n # count_ratings_per_users = pd.DataFrame(count_ratings_per_users.items(), columns = ['userId','num_ratings'])\n # count_ratings_per_users = count_ratings_per_users.sort_values(by = 'num_ratings', axis =0, ascending = False)\n\n # In [145]: count_ratings_per_users\n # Out[145]:\n # userId num_ratings\n # 12 13 526\n # 404 405 520\n # 654 655 443\n # 449 450 423\n # 275 276 400\n # 302 303 385\n # 233 234 369\n # 536 537 349\n # 6 7 328\n # 845 846 321\n # 392 393 318\n # 307 308 314\n # 278 279 308\n # 180 181 308\n # 93 94 306\n # 91 92 296\n # 428 429 295\n # 416 417 292\n # 879 880 288\n # 757 758 287\n # 221 222 286\n # 434 435 285\n # 292 293 280\n # 200 201 279\n # 377 378 276\n # 560 561 275\n # 129 130 273\n # 681 682 273\n # 591 592 271\n # 58 59 270\n\n '''\n User 13's ratings on Final movies\n [5, 4, 4, 4, 4, 5, 2, 4, 2, 3, 3, 4, 4, 5]\n\n User 405's ratings on Final movies\n [3, 5, 4, 5, 3, 4, 3, 4, 5, 3, 4, 5, 5, 3, 5, 5, 5, 3]\n User 450's ratings on Final movies\n [4, 5, 3, 5, 3, 4, 5, 5, 3, 3, 5, 3, 5, 4, 3, 5, 4, 4, 5, 3, 4, 4]\n\n User 276's ratings on Final movies\n [5, 4, 5, 5, 4, 4, 4, 5, 4, 4, 4, 5]\n User 276's ratings on Final movies\n [5, 4, 4, 4, 5, 4, 4, 4, 4, 4, 4, 5, 4]\n\n User 303's ratings on Final movies\n [4]\n User 303's ratings on Final movies\n [4]\n\n User 234's ratings on Final movies\n [3, 3, 4]\n User 234's ratings on Final movies\n [3, 4, 2, 3]\n\n User 537's ratings on Final movies\n [3, 3, 1, 3, 3, 3, 3, 2, 2]\n User 537's ratings on Final movies\n [2, 1, 3, 3, 2, 3]\n\n User 7's ratings on Final movies\n [5, 4, 5, 5, 1, 3, 5, 5, 5, 5, 5, 5, 4]\n User 7's ratings on Final movies\n [5, 5, 5, 5, 3, 5, 1, 5, 4, 5, 4, 5, 4, 5]\n\n User 846's ratings on Final movies\n []\n\n User 393's ratings on Final movies\n [3, 3, 3, 4]\n\n User 308's ratings on Final movies\n [4, 3, 4, 3, 3, 4, 4]\n\n User 279's ratings on Final movies\n [3, 3, 3, 1, 4, 5, 4, 3, 3, 4, 1, 5, 4, 3]\n\n User 181's ratings on Final movies\n [1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2]\n\n User 94's ratings on Final movies\n [5, 5, 4, 5, 4, 3, 5]\n\n User 92's ratings on Final movies\n [4, 3, 3, 4]\n\n User 429's ratings on Final movies\n [5, 4, 5, 5, 5, 5, 4, 4]\n\n User 417's ratings on Final movies\n [4, 3]\n '''\n # preprocessing the data\n # ratings_as_mat = pickle.load(open('data/ratings_as_mat_train.pkl', 'rb'))\n # movies_as_mat = pickle.load(open('data/movies_as_mat_train_.pkl', 'rb'))\n # movies_contents = pd.read_csv(\"data/movies.csv\")\n # movies_contents = movies_contents.set_index('movieId')\n train_df = pickle.load(open('data/train_df.pkl', 'rb'))\n test_df = pickle.load(open('data/test_df.pkl', 'rb'))\n\n train_user = train_df[train_df['userId'] == user] # user already rated. used for training\n test_user = test_df[test_df['userId'] == user] # user already rated. used for validation\n\n # items, item_numbers, disliked_list from train_df\n items = pickle.load(open('data/items_train_{}.pkl'.format((up,down)), 'rb'))\n item_numbers = pickle.load(open('data/item_numbers_train_{}.pkl'.format((up,down)), 'rb'))\n disliked_list = pickle.load(open('data/disliked_list_train_{}.pkl'.format((up,down)), 'rb'))\n rtmp = sparse.csr_matrix(ratings_as_mat)\n\n # examine_list = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)\n #testing(train_df, test_df, items, item_numbers, disliked_list, rtmp, user)\n\n # copy and paste... of the function testing\n negative_items = disliked_list\n recommendations = test_simple_instance(items, item_numbers, negative_items, user)\n print \"Recommended movies\"\n extracted = extract_names(recommendations, movies_contents)\n\n print \"User {}'s ratings on Recommended movis\".format(user)\n examine_list = examine_the_results2(extracted, rtmp, user)\n\n final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)\n print final\n print \"final\"\n #recommended_list = list(zip(*recommendations)[0])\n #recommended_list = [int(x)-1 for x in recommended_list]\n # final_extracted = movies_contents.ix[final,:]\n # print \"User {}'s ratings on Recommended movis\".format(user)\n # examine_list = examine_the_results2(final_extracted, rtmp, user)\n\n print 'train_user.shape',train_user.shape\n print 'test_user.shape',test_user.shape\n print 'train_user.movieId.unique()',train_user.movieId.unique().size\n print 'test_user.movieId.unique()',test_user.movieId.unique().size\n\n # final=[]\n # for movie in recommended_list:\n # if movie in train_user.movieId.unique():\n # print \"movie \",movie,' is in train_user'\n # if movie in test_user.movieId.unique():\n # final.append(movie)\n # print \"movie \",movie,' is in test_user'\n print \"Final movies\"\n final_extracted = movies_contents.ix[final,:]\n print \"User {}'s ratings on Final movies\".format(user)\n final_examine_list = examine_the_results3(final_extracted, test_user, user)\n\n\n\n\n\n # later...\n #examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, ratings_as_mat, user=654)\n #examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user=654)\n #or row in examine_list_iter for x in row if x > 0])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.