code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Data operations, will be used in run_pretrain.py
"""
import os
import math
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as de
import mindspore.dataset.transforms as C
from mindspore import log as logger
class BucketDatasetGenerator:
"""
Provide data distribution of different gears for the bert network.
Args:
data_set (Dataset): The training dataset.
batch_size (Int): The training batchsize.
bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.
"""
def __init__(self, data_set, batch_size, bucket_list=None):
self.dataset = data_set
self.batch_size = batch_size
self.bucket_list = bucket_list
self.data_bucket = {bucket: [] for bucket in bucket_list}
bucket_size = len(bucket_list)
self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__())
self.random_list = (self.random_list + 2) % bucket_size
self.random_list = [bucket_list[i] for i in self.random_list]
self.iter = 0
def __next__(self):
for item in self.iterator:
for seq_length in self.bucket_list:
if np.sum(item[1]) <= seq_length:
self.data_bucket[seq_length].append(item)
break
for key in self.data_bucket.keys():
data = self.data_bucket[key]
if len(data) >= self.batch_size and self.random_list[self.iter] == key:
self.data_bucket[key] = self.data_bucket[key][self.batch_size:]
arr = data[0]
for i in range(1, self.batch_size):
current_data = data[i]
for j in range(len(current_data)):
arr[j] = np.concatenate((arr[j], current_data[j]))
res = ()
for label in arr:
newlabel = np.reshape(label, (self.batch_size, -1))
res += (newlabel,)
res += (np.array(key, np.int32),)
self.iter += 1
return res
raise StopIteration
def __iter__(self):
self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)
return self
def __len__(self):
return (self.dataset.get_dataset_size() // self.batch_size) - 1
def create_albert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None, batch_size=32,
bucket_list=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=de.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
if bucket_list:
bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list)
data_set = de.GeneratorDataset(bucket_dataset,
column_names=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights",
"sentence_flag"],
shuffle=False)
else:
data_set = data_set.batch(batch_size, drop_remainder=True)
ori_dataset_size = data_set.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
logger.info("data size: {}".format(data_set.get_dataset_size()))
logger.info("repeat count: {}".format(data_set.get_repeat_count()))
return data_set
def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy",
data_file_path=None, schema_file_path=None, do_shuffle=True,
rank_size=1, rank_id=0):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
ds = de.MindDataset([data_file_path],
columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle,
num_shards=rank_size, shard_id=rank_id)
if assessment_method == "Spearman_correlation":
type_cast_op_float = C.TypeCast(mstype.float32)
ds = ds.map(operations=type_cast_op_float, input_columns="label_ids")
else:
ds = ds.map(operations=type_cast_op, input_columns="label_ids")
ds = ds.map(operations=type_cast_op, input_columns="segment_ids")
ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="input_ids")
ds = ds.repeat(repeat_count)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def generator_squad(data_features):
for feature in data_features:
yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id)
def generator_squad_train(data_features):
for feature in data_features:
yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position,
feature.unique_id, feature.is_impossible)
def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None,
is_training=True, do_shuffle=True, rank_size=1,
rank_id=0):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
if is_training:
print("data_file_path: ", data_file_path)
print("rank_id: ", rank_id)
ds = de.MindDataset([data_file_path],
columns_list=["input_ids", "input_mask", "segment_ids", "start_positions",
"end_positions", "unique_ids", "is_impossible"],
shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id)
ds = ds.map(operations=type_cast_op, input_columns="start_positions")
ds = ds.map(operations=type_cast_op, input_columns="end_positions")
else:
ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle,
column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"])
ds = ds.map(operations=type_cast_op, input_columns="input_ids")
ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="segment_ids")
ds = ds.map(operations=type_cast_op, input_columns="unique_ids")
ds = ds.repeat(repeat_count)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None):
"""create evaluation dataset"""
data_files = []
if os.path.isdir(data_dir):
files = os.listdir(data_dir)
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
else:
data_files.append(data_dir)
data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shard_equal_rows=True)
ori_dataset_size = data_set.get_dataset_size()
print("origin eval size: ", ori_dataset_size)
dtypes = data_set.output_types()
shapes = data_set.output_shapes()
output_batches = math.ceil(ori_dataset_size / device_num / batchsize)
padded_num = output_batches * device_num * batchsize - ori_dataset_size
print("padded num: ", padded_num)
if padded_num > 0:
item = {"input_ids": np.zeros(shapes[0], dtypes[0]),
"input_mask": np.zeros(shapes[1], dtypes[1]),
"segment_ids": np.zeros(shapes[2], dtypes[2]),
"next_sentence_labels": np.zeros(shapes[3], dtypes[3]),
"masked_lm_positions": np.zeros(shapes[4], dtypes[4]),
"masked_lm_ids": np.zeros(shapes[5], dtypes[5]),
"masked_lm_weights": np.zeros(shapes[6], dtypes[6])}
padded_samples = [item for x in range(padded_num)]
padded_ds = de.PaddedDataset(padded_samples)
eval_ds = data_set + padded_ds
sampler = de.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False)
eval_ds.use_sampler(sampler)
else:
eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
type_cast_op = C.TypeCast(mstype.int32)
eval_ds = eval_ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="next_sentence_labels", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="segment_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_mask", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_ids", operations=type_cast_op)
eval_ds = eval_ds.batch(batchsize, drop_remainder=True)
print("eval data size: {}".format(eval_ds.get_dataset_size()))
print("eval repeat count: {}".format(eval_ds.get_repeat_count()))
return eval_ds
|
normal
|
{
"blob_id": "8ae10aada79b0a687732e341d275eb3823ec0e4a",
"index": 9475,
"step-1": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\n<mask token>\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n",
"step-2": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\n<mask token>\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n",
"step-3": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n",
"step-4": "<mask token>\nimport os\nimport math\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as de\nimport mindspore.dataset.transforms as C\nfrom mindspore import log as logger\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n",
"step-5": "# Copyright 2021-2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nData operations, will be used in run_pretrain.py\n\"\"\"\nimport os\nimport math\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as de\nimport mindspore.dataset.transforms as C\nfrom mindspore import log as logger\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter] == key:\n self.data_bucket[key] = self.data_bucket[key][self.batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += (newlabel,)\n res += (np.array(key, np.int32),)\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return (self.dataset.get_dataset_size() // self.batch_size) - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle=\"true\", data_dir=None, schema_dir=None, batch_size=32,\n bucket_list=None):\n \"\"\"create train dataset\"\"\"\n # apply repeat operations\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if \"tfrecord\" in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n shuffle=de.Shuffle.FILES if do_shuffle == \"true\" else False,\n num_shards=device_num, shard_id=rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset,\n column_names=[\"input_ids\", \"input_mask\", \"segment_ids\",\n \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\",\n \"sentence_flag\"],\n shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\"masked_lm_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"masked_lm_positions\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"next_sentence_labels\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"segment_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"input_mask\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"input_ids\")\n # apply batch operations\n logger.info(\"data size: {}\".format(data_set.get_dataset_size()))\n logger.info(\"repeat count: {}\".format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1, assessment_method=\"accuracy\",\n data_file_path=None, schema_file_path=None, do_shuffle=True,\n rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path],\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"label_ids\"], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == \"Spearman_correlation\":\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns=\"label_ids\")\n else:\n ds = ds.map(operations=type_cast_op, input_columns=\"label_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"segment_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_mask\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_ids\")\n ds = ds.repeat(repeat_count)\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id)\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position,\n feature.unique_id, feature.is_impossible)\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None,\n is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print(\"data_file_path: \", data_file_path)\n print(\"rank_id: \", rank_id)\n ds = de.MindDataset([data_file_path],\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"start_positions\",\n \"end_positions\", \"unique_ids\", \"is_impossible\"],\n shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns=\"start_positions\")\n ds = ds.map(operations=type_cast_op, input_columns=\"end_positions\")\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle,\n column_names=[\"input_ids\", \"input_mask\", \"segment_ids\", \"unique_ids\"])\n\n ds = ds.map(operations=type_cast_op, input_columns=\"input_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_mask\")\n ds = ds.map(operations=type_cast_op, input_columns=\"segment_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"unique_ids\")\n ds = ds.repeat(repeat_count)\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if \"tfrecord\" in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print(\"origin eval size: \", ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print(\"padded num: \", padded_num)\n if padded_num > 0:\n item = {\"input_ids\": np.zeros(shapes[0], dtypes[0]),\n \"input_mask\": np.zeros(shapes[1], dtypes[1]),\n \"segment_ids\": np.zeros(shapes[2], dtypes[2]),\n \"next_sentence_labels\": np.zeros(shapes[3], dtypes[3]),\n \"masked_lm_positions\": np.zeros(shapes[4], dtypes[4]),\n \"masked_lm_ids\": np.zeros(shapes[5], dtypes[5]),\n \"masked_lm_weights\": np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\",\n \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n num_shards=device_num, shard_id=rank, shard_equal_rows=True)\n\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns=\"masked_lm_ids\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"masked_lm_positions\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"next_sentence_labels\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"segment_ids\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"input_mask\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"input_ids\", operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print(\"eval data size: {}\".format(eval_ds.get_dataset_size()))\n print(\"eval repeat count: {}\".format(eval_ds.get_repeat_count()))\n return eval_ds\n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guac_auth', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='guacamoleconnectiongroup',
name='type',
),
migrations.AlterUniqueTogether(
name='guacamoleconnectiongrouppermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleconnectionpermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamolesystempermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleuserpermission',
unique_together=set([]),
),
migrations.RemoveField(
model_name='guacamoleconnectiongrouppermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleconnectionpermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamolesystempermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleuserpermission',
name='permission',
),
]
|
normal
|
{
"blob_id": "7f63097265b1058785e90441f85b7f0088946717",
"index": 7785,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('guac_auth', '0001_initial')]\n operations = [migrations.RemoveField(model_name=\n 'guacamoleconnectiongroup', name='type'), migrations.\n AlterUniqueTogether(name='guacamoleconnectiongrouppermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleconnectionpermission', unique_together=set([])),\n migrations.AlterUniqueTogether(name='guacamolesystempermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleuserpermission', unique_together=set([])), migrations.\n RemoveField(model_name='guacamoleconnectiongrouppermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleconnectionpermission', name='permission'), migrations.\n RemoveField(model_name='guacamolesystempermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleuserpermission', name='permission')]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('guac_auth', '0001_initial')]\n operations = [migrations.RemoveField(model_name=\n 'guacamoleconnectiongroup', name='type'), migrations.\n AlterUniqueTogether(name='guacamoleconnectiongrouppermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleconnectionpermission', unique_together=set([])),\n migrations.AlterUniqueTogether(name='guacamolesystempermission',\n unique_together=set([])), migrations.AlterUniqueTogether(name=\n 'guacamoleuserpermission', unique_together=set([])), migrations.\n RemoveField(model_name='guacamoleconnectiongrouppermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleconnectionpermission', name='permission'), migrations.\n RemoveField(model_name='guacamolesystempermission', name=\n 'permission'), migrations.RemoveField(model_name=\n 'guacamoleuserpermission', name='permission')]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('guac_auth', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='guacamoleconnectiongroup',\n name='type',\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleconnectiongrouppermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleconnectionpermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamolesystempermission',\n unique_together=set([]),\n ),\n migrations.AlterUniqueTogether(\n name='guacamoleuserpermission',\n unique_together=set([]),\n ),\n migrations.RemoveField(\n model_name='guacamoleconnectiongrouppermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamoleconnectionpermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamolesystempermission',\n name='permission',\n ),\n migrations.RemoveField(\n model_name='guacamoleuserpermission',\n name='permission',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Exercise 9c"""
import time
import numpy as np
import matplotlib.pyplot as plt
from plot_results import plot_2d
from run_simulation import run_simulation
from simulation_parameters import SimulationParameters
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
#for Rhead in np.linspace(0.2,0.5,10)
#for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
# Grid search
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(
world,
parameters,
timestep,
int(1000*parameters.simulation_duration/timestep),
logs="./logs/9c/simulation_{}.npz".format(simulation_i)
)
plot_9c(parameter_set)
def main():
n_joints = 10
#Rhead = 0.44
#Rtail = 0.27
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
for Rhead in np.linspace(0.2,0.5,10)
for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set),3])
results_en = np.zeros([len(parameter_set),3])
ratio_vel_en = np.zeros([len(parameter_set),3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
#path = os.path.dirname(__file__)
path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
print(path)
for i in range(len(parameter_set)):
with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:
#? initialisation for the computation
position = data["links"][:, 0, :]
n_steps = len(position)
timestep = float(data["timestep"])
results_vel[i][0] = data["amplitude_gradient"][0]
results_vel[i][1] = data["amplitude_gradient"][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
#! Velocity
begin_step = (int)(4/timestep)
vel = (position[n_steps-1,:] - position[begin_step,:])**2
results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)
#! Energy
joint_vel = data["joints"][begin_step:,:,1]
joint_tor = data["joints"][begin_step:,:,3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))
#! Ratio
ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]
print ('Time elapsed for the velocity plot' + str(time.time()-t))
plt.figure("Velocity")
plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])
plt.figure("Energy")
plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])
plt.figure("Ratio")
plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\cdot kg^{-1}\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a0284eba1a0e6c498f240068c586e7f8b79cd86c",
"index": 5782,
"step-1": "<mask token>\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"Exercise 9c\"\"\"\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n\n n_joints = 10\n \n Rhead = 0.44\n Rtail = 0.23\n\n\n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n #for Rhead in np.linspace(0.2,0.5,10)\n #for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n \n # Grid search\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(\n world,\n parameters,\n timestep,\n int(1000*parameters.simulation_duration/timestep),\n logs=\"./logs/9c/simulation_{}.npz\".format(simulation_i)\n )\n\n \n\n plot_9c(parameter_set)\n \n\n \n\ndef main():\n\n\n n_joints = 10\n\n #Rhead = 0.44\n #Rtail = 0.27 \n \n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n for Rhead in np.linspace(0.2,0.5,10)\n for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n plot_9c(parameter_set)\n\n \ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set),3])\n results_en = np.zeros([len(parameter_set),3])\n ratio_vel_en = np.zeros([len(parameter_set),3])\n \n \n sal_pos_t = []\n sal_pos_t_bad = []\n\n \n t = time.time()\n\n #path = os.path.dirname(__file__)\n path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:\n \n #? initialisation for the computation\n position = data[\"links\"][:, 0, :]\n n_steps = len(position)\n \n timestep = float(data[\"timestep\"])\n\n results_vel[i][0] = data[\"amplitude_gradient\"][0]\n results_vel[i][1] = data[\"amplitude_gradient\"][1] \n\n results_en[i][:2] = results_vel[i][:2] \n ratio_vel_en[i][:2] = results_vel[i][:2]\n\n \n\n #! Velocity\n\n begin_step = (int)(4/timestep)\n\n vel = (position[n_steps-1,:] - position[begin_step,:])**2\n results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)\n\n #! Energy\n\n joint_vel = data[\"joints\"][begin_step:,:,1]\n joint_tor = data[\"joints\"][begin_step:,:,3]\n\n energy = joint_vel * joint_tor\n \n results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))\n \n #! Ratio \n\n ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]\n \n \n print ('Time elapsed for the velocity plot' + str(time.time()-t))\n\n\n\n plt.figure(\"Velocity\")\n plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])\n plt.figure(\"Energy\")\n plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])\n plt.figure(\"Ratio\")\n plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])\n \n t = time.time()\n \n plt.show() \n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
|
normal
|
{
"blob_id": "0279057b3962e4b9839a86fc2e2683ac1da11b1a",
"index": 8665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-3": "from amqpstorm import management\nif __name__ == '__main__':\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671',\n 'guest', 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-4": "from amqpstorm import management\n\nif __name__ == '__main__':\n # If using a self-signed certificate, change verify=True to point at your CA bundle.\n # You can disable certificate verification for testing by passing in verify=False.\n API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',\n 'guest', verify=True)\n try:\n result = API.aliveness_test('/')\n if result['status'] == 'ok':\n print('RabbitMQ is alive!')\n else:\n print('RabbitMQ is not alive! :(')\n except management.ApiConnectionError as why:\n print('Connection Error: %s' % why)\n except management.ApiError as why:\n print('ApiError: %s' % why)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# encoding:UTF-8
# 题目:斐波那契数列。
def fib(n):
if n==1 or n==2:
return 1
return fib(n-1)+fib(n-2)
print (fib(10))
|
normal
|
{
"blob_id": "59376f6565cd72e20087609253a41c04c6327a27",
"index": 6324,
"step-1": "<mask token>\n",
"step-2": "def fib(n):\n if n == 1 or n == 2:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\n<mask token>\n",
"step-3": "def fib(n):\n if n == 1 or n == 2:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\nprint(fib(10))\n",
"step-4": "# encoding:UTF-8\n# 题目:斐波那契数列。\ndef fib(n):\n\tif n==1 or n==2:\n\t\treturn 1\n\treturn fib(n-1)+fib(n-2)\nprint (fib(10))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# python >= 3.7
# supported xmanager version <5.1, 5.1, 5.2, 6
import os
import argparse
import configparser
import unicodedata
from win32api import GetComputerName, GetUserName
from win32security import LookupAccountName, ConvertSidToStringSid
from base64 import b64encode, b64decode
from Cryptodome.Hash import MD5, SHA256
from Cryptodome.Cipher import ARC4
USERNAME = GetUserName()
MASTER_PWD = None
SID = ConvertSidToStringSid(LookupAccountName(GetComputerName(), GetUserName())[0])
IS_XSH = True
VERSION = '5.2'
KEY = os.path.join(os.environ["USERPROFILE"], r"Documents\NetSarang\Xshell\Sessions")
IS_DECRYPT = True
def getCipherKey():
if not is_number(VERSION):
raise ValueError('Invalid argument: --Version')
ver = float(VERSION)
if 0 < ver and ver < 5.1:
if IS_XSH:
return MD5.new(b'!X@s#h$e%l^l&').digest()
else:
return MD5.new(b'!X@s#c$e%l^l&').digest()
elif 5.1 <= ver and ver <= 5.2:
return SHA256.new(SID.encode()).digest()
elif 5.2 < ver:
if MASTER_PWD == None:
return SHA256.new((USERNAME + SID).encode()).digest()
else:
return SHA256.new(MASTER_PWD.encode()).digest()
else:
raise ValueError('Invalid argument: --Version')
def encrypt_string(password_string, need_return=False):
if not is_number(VERSION):
raise ValueError('Invalid argument: --Version')
ver = float(VERSION)
Cipher = ARC4.new(getCipherKey())
if ver < 5.1:
en_password = b64encode(Cipher.encrypt(password_string.encode())).decode()
else:
checksum = SHA256.new(password_string.encode()).digest()
ciphertext = Cipher.encrypt(password_string.encode())
en_password = b64encode(ciphertext + checksum).decode()
if need_return:
return en_password
else:
print('%-20s : %s' % ('Version', VERSION))
print('%-20s : %s' % ('Password', password_string))
print('%-20s : %s' % ('Encrypted Password', en_password))
def decrypt_string(password_string, need_return=False):
if not is_number(VERSION):
raise ValueError('Invalid argument: --Version')
ver = float(VERSION)
Cipher = ARC4.new(getCipherKey())
try:
if ver < 5.1:
de_password = Cipher.decrypt(b64decode(password_string)).decode()
else:
data = b64decode(password_string)
ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256.digest_size:]
plaintext = Cipher.decrypt(ciphertext)
if SHA256.new(plaintext).digest() != checksum:
raise ValueError('Cannot decrypt string. The key is wrong!')
de_password = plaintext.decode('ascii')
if need_return:
return de_password
else:
print('%-20s : %s' % ('Version', VERSION))
print('%-20s : %s' % ('Password', password_string))
print('%-20s : %s' % ('Decrypted Password', de_password))
except Exception as e:
print("Password is invalid")
def decrypt_file(filepath: str = ''):
if not os.path.isfile(filepath):
print(f"{filepath:=^100}\nError: No file")
return
file = os.path.basename(os.path.realpath(filepath))
if file.endswith(".xsh") or file.endswith(".xfp"):
cfg = configparser.ConfigParser()
try:
cfg.read(filepath)
except UnicodeDecodeError:
cfg.read(filepath, encoding="utf-16")
try:
if file.endswith(".xsh"):
host = cfg["CONNECTION"]["Host"]
port = cfg["CONNECTION"]["Port"]
username = cfg["CONNECTION:AUTHENTICATION"]["UserName"]
password = cfg["CONNECTION:AUTHENTICATION"]["Password"]
version = cfg["SessionInfo"]["Version"]
de_password = decrypt_string(password, True)
else:
host = cfg["Connection"]["Host"]
port = cfg["Connection"]["Port"]
username = cfg["Connection"]["UserName"]
password = cfg["Connection"]["Password"]
version = cfg["SessionInfo"]["Version"]
de_password = decrypt_string(password, True)
print(f"{filepath:=^100}")
print('%-20s : %s' % ('Host', host))
print('%-20s : %s' % ('Port', port))
print('%-20s : %s' % ('Version', version))
print('%-20s : %s' % ('UserName', username))
print('%-20s : %s' % ('Password', de_password))
print('%-20s : %s' % ('Encrypted Password', password))
except Exception as e:
print(f"{filepath:=^100}\nError:{e}")
def decrypt_dir():
for root, dirs, files in os.walk(KEY):
for f in files:
decrypt_file(os.path.join(root, f))
def setDefaultSessionDirByVer():
if not is_number(VERSION):
return
ver = float(VERSION)
dir = 'Xshell' if IS_XSH else 'Xftp';
global KEY
if ver < 6:
KEY = os.path.join(os.environ["USERPROFILE"], r"Documents\NetSarang\%s\Sessions" % dir)
elif ver == 6:
KEY = os.path.join(os.environ["USERPROFILE"], r"Documents\NetSarang Computer\6\%s\Sessions" % dir)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="xsh, xfp password decrypt")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("-e", "--encrypt", default=False,
help="<-e | -d> encrypt password, default -d", action="store_true")
group.add_argument("-d", "--decrypt", default=True,
help="<-e | -d> decrypt encrypted password, default -d", action="store_true")
parser.add_argument("-f", "--ftp", default=False,
help="xftp or xshell. Ignore if it is xshell", action="store_true")
parser.add_argument("-u", "--username", default="", type=str,
help="user `whoami /user` in command. Ignore if it is local. Used by version >= 5.1")
parser.add_argument("-m", "--master_pwd", default="", type=str,
help="user\'s master password. Used by version >= 6")
parser.add_argument("-s", "--sid", default="", type=str,
help="SID `whoami /user` in command. Ignore if it is local. Used by version >= 5.1")
parser.add_argument("-v", "--version", default="", type=str,
help="xsh or xfp version. If not specified, 5.2 will be used.")
parser.add_argument("-k", "--key", default="", nargs='?',
help="the path of sessions directory or file of xsh or xfp, or password or other key")
args = parser.parse_args()
#print(args)
if args.encrypt:
IS_DECRYPT = False
if args.sid:
SID = args.sid
if args.username:
USERNAME = args.username
if args.master_pwd:
MASTER_PWD = args.master_pwd
if args.ftp:
IS_XSH = False
if is_number(args.version):
VERSION = args.version
if args.key:
KEY = args.key
if not args.key and (is_number(args.version) or args.ftp):
setDefaultSessionDirByVer()
if IS_DECRYPT:
if os.path.isdir(KEY):
decrypt_dir()
elif os.path.isfile(KEY):
decrypt_file(KEY)
else:
decrypt_string(KEY)
else:
encrypt_string(KEY)
|
normal
|
{
"blob_id": "5f2427c077d460d109f5a3e94b93f72c090f036d",
"index": 7181,
"step-1": "<mask token>\n\n\ndef decrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n try:\n if ver < 5.1:\n de_password = Cipher.decrypt(b64decode(password_string)).decode()\n else:\n data = b64decode(password_string)\n ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256\n .digest_size:]\n plaintext = Cipher.decrypt(ciphertext)\n if SHA256.new(plaintext).digest() != checksum:\n raise ValueError('Cannot decrypt string. The key is wrong!')\n de_password = plaintext.decode('ascii')\n if need_return:\n return de_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Decrypted Password', de_password))\n except Exception as e:\n print('Password is invalid')\n\n\ndef decrypt_file(filepath: str=''):\n if not os.path.isfile(filepath):\n print(f'{filepath:=^100}\\nError: No file')\n return\n file = os.path.basename(os.path.realpath(filepath))\n if file.endswith('.xsh') or file.endswith('.xfp'):\n cfg = configparser.ConfigParser()\n try:\n cfg.read(filepath)\n except UnicodeDecodeError:\n cfg.read(filepath, encoding='utf-16')\n try:\n if file.endswith('.xsh'):\n host = cfg['CONNECTION']['Host']\n port = cfg['CONNECTION']['Port']\n username = cfg['CONNECTION:AUTHENTICATION']['UserName']\n password = cfg['CONNECTION:AUTHENTICATION']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n else:\n host = cfg['Connection']['Host']\n port = cfg['Connection']['Port']\n username = cfg['Connection']['UserName']\n password = cfg['Connection']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n print(f'{filepath:=^100}')\n print('%-20s : %s' % ('Host', host))\n print('%-20s : %s' % ('Port', port))\n print('%-20s : %s' % ('Version', version))\n print('%-20s : %s' % ('UserName', username))\n print('%-20s : %s' % ('Password', de_password))\n print('%-20s : %s' % ('Encrypted Password', password))\n except Exception as e:\n print(f'{filepath:=^100}\\nError:{e}')\n\n\ndef decrypt_dir():\n for root, dirs, files in os.walk(KEY):\n for f in files:\n decrypt_file(os.path.join(root, f))\n\n\ndef setDefaultSessionDirByVer():\n if not is_number(VERSION):\n return\n ver = float(VERSION)\n dir = 'Xshell' if IS_XSH else 'Xftp'\n global KEY\n if ver < 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang\\\\%s\\\\Sessions' % dir)\n elif ver == 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang Computer\\\\6\\\\%s\\\\Sessions' % dir)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getCipherKey():\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n if 0 < ver and ver < 5.1:\n if IS_XSH:\n return MD5.new(b'!X@s#h$e%l^l&').digest()\n else:\n return MD5.new(b'!X@s#c$e%l^l&').digest()\n elif 5.1 <= ver and ver <= 5.2:\n return SHA256.new(SID.encode()).digest()\n elif 5.2 < ver:\n if MASTER_PWD == None:\n return SHA256.new((USERNAME + SID).encode()).digest()\n else:\n return SHA256.new(MASTER_PWD.encode()).digest()\n else:\n raise ValueError('Invalid argument: --Version')\n\n\ndef encrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n if ver < 5.1:\n en_password = b64encode(Cipher.encrypt(password_string.encode())\n ).decode()\n else:\n checksum = SHA256.new(password_string.encode()).digest()\n ciphertext = Cipher.encrypt(password_string.encode())\n en_password = b64encode(ciphertext + checksum).decode()\n if need_return:\n return en_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Encrypted Password', en_password))\n\n\ndef decrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n try:\n if ver < 5.1:\n de_password = Cipher.decrypt(b64decode(password_string)).decode()\n else:\n data = b64decode(password_string)\n ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256\n .digest_size:]\n plaintext = Cipher.decrypt(ciphertext)\n if SHA256.new(plaintext).digest() != checksum:\n raise ValueError('Cannot decrypt string. The key is wrong!')\n de_password = plaintext.decode('ascii')\n if need_return:\n return de_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Decrypted Password', de_password))\n except Exception as e:\n print('Password is invalid')\n\n\ndef decrypt_file(filepath: str=''):\n if not os.path.isfile(filepath):\n print(f'{filepath:=^100}\\nError: No file')\n return\n file = os.path.basename(os.path.realpath(filepath))\n if file.endswith('.xsh') or file.endswith('.xfp'):\n cfg = configparser.ConfigParser()\n try:\n cfg.read(filepath)\n except UnicodeDecodeError:\n cfg.read(filepath, encoding='utf-16')\n try:\n if file.endswith('.xsh'):\n host = cfg['CONNECTION']['Host']\n port = cfg['CONNECTION']['Port']\n username = cfg['CONNECTION:AUTHENTICATION']['UserName']\n password = cfg['CONNECTION:AUTHENTICATION']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n else:\n host = cfg['Connection']['Host']\n port = cfg['Connection']['Port']\n username = cfg['Connection']['UserName']\n password = cfg['Connection']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n print(f'{filepath:=^100}')\n print('%-20s : %s' % ('Host', host))\n print('%-20s : %s' % ('Port', port))\n print('%-20s : %s' % ('Version', version))\n print('%-20s : %s' % ('UserName', username))\n print('%-20s : %s' % ('Password', de_password))\n print('%-20s : %s' % ('Encrypted Password', password))\n except Exception as e:\n print(f'{filepath:=^100}\\nError:{e}')\n\n\ndef decrypt_dir():\n for root, dirs, files in os.walk(KEY):\n for f in files:\n decrypt_file(os.path.join(root, f))\n\n\ndef setDefaultSessionDirByVer():\n if not is_number(VERSION):\n return\n ver = float(VERSION)\n dir = 'Xshell' if IS_XSH else 'Xftp'\n global KEY\n if ver < 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang\\\\%s\\\\Sessions' % dir)\n elif ver == 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang Computer\\\\6\\\\%s\\\\Sessions' % dir)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='xsh, xfp password decrypt')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('-e', '--encrypt', default=False, help=\n '<-e | -d> encrypt password, default -d', action='store_true')\n group.add_argument('-d', '--decrypt', default=True, help=\n '<-e | -d> decrypt encrypted password, default -d', action='store_true'\n )\n parser.add_argument('-f', '--ftp', default=False, help=\n 'xftp or xshell. Ignore if it is xshell', action='store_true')\n parser.add_argument('-u', '--username', default='', type=str, help=\n 'user `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-m', '--master_pwd', default='', type=str, help=\n \"user's master password. Used by version >= 6\")\n parser.add_argument('-s', '--sid', default='', type=str, help=\n 'SID `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-v', '--version', default='', type=str, help=\n 'xsh or xfp version. If not specified, 5.2 will be used.')\n parser.add_argument('-k', '--key', default='', nargs='?', help=\n 'the path of sessions directory or file of xsh or xfp, or password or other key'\n )\n args = parser.parse_args()\n if args.encrypt:\n IS_DECRYPT = False\n if args.sid:\n SID = args.sid\n if args.username:\n USERNAME = args.username\n if args.master_pwd:\n MASTER_PWD = args.master_pwd\n if args.ftp:\n IS_XSH = False\n if is_number(args.version):\n VERSION = args.version\n if args.key:\n KEY = args.key\n if not args.key and (is_number(args.version) or args.ftp):\n setDefaultSessionDirByVer()\n if IS_DECRYPT:\n if os.path.isdir(KEY):\n decrypt_dir()\n elif os.path.isfile(KEY):\n decrypt_file(KEY)\n else:\n decrypt_string(KEY)\n else:\n encrypt_string(KEY)\n",
"step-3": "<mask token>\nUSERNAME = GetUserName()\nMASTER_PWD = None\nSID = ConvertSidToStringSid(LookupAccountName(GetComputerName(),\n GetUserName())[0])\nIS_XSH = True\nVERSION = '5.2'\nKEY = os.path.join(os.environ['USERPROFILE'],\n 'Documents\\\\NetSarang\\\\Xshell\\\\Sessions')\nIS_DECRYPT = True\n\n\ndef getCipherKey():\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n if 0 < ver and ver < 5.1:\n if IS_XSH:\n return MD5.new(b'!X@s#h$e%l^l&').digest()\n else:\n return MD5.new(b'!X@s#c$e%l^l&').digest()\n elif 5.1 <= ver and ver <= 5.2:\n return SHA256.new(SID.encode()).digest()\n elif 5.2 < ver:\n if MASTER_PWD == None:\n return SHA256.new((USERNAME + SID).encode()).digest()\n else:\n return SHA256.new(MASTER_PWD.encode()).digest()\n else:\n raise ValueError('Invalid argument: --Version')\n\n\ndef encrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n if ver < 5.1:\n en_password = b64encode(Cipher.encrypt(password_string.encode())\n ).decode()\n else:\n checksum = SHA256.new(password_string.encode()).digest()\n ciphertext = Cipher.encrypt(password_string.encode())\n en_password = b64encode(ciphertext + checksum).decode()\n if need_return:\n return en_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Encrypted Password', en_password))\n\n\ndef decrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n try:\n if ver < 5.1:\n de_password = Cipher.decrypt(b64decode(password_string)).decode()\n else:\n data = b64decode(password_string)\n ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256\n .digest_size:]\n plaintext = Cipher.decrypt(ciphertext)\n if SHA256.new(plaintext).digest() != checksum:\n raise ValueError('Cannot decrypt string. The key is wrong!')\n de_password = plaintext.decode('ascii')\n if need_return:\n return de_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Decrypted Password', de_password))\n except Exception as e:\n print('Password is invalid')\n\n\ndef decrypt_file(filepath: str=''):\n if not os.path.isfile(filepath):\n print(f'{filepath:=^100}\\nError: No file')\n return\n file = os.path.basename(os.path.realpath(filepath))\n if file.endswith('.xsh') or file.endswith('.xfp'):\n cfg = configparser.ConfigParser()\n try:\n cfg.read(filepath)\n except UnicodeDecodeError:\n cfg.read(filepath, encoding='utf-16')\n try:\n if file.endswith('.xsh'):\n host = cfg['CONNECTION']['Host']\n port = cfg['CONNECTION']['Port']\n username = cfg['CONNECTION:AUTHENTICATION']['UserName']\n password = cfg['CONNECTION:AUTHENTICATION']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n else:\n host = cfg['Connection']['Host']\n port = cfg['Connection']['Port']\n username = cfg['Connection']['UserName']\n password = cfg['Connection']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n print(f'{filepath:=^100}')\n print('%-20s : %s' % ('Host', host))\n print('%-20s : %s' % ('Port', port))\n print('%-20s : %s' % ('Version', version))\n print('%-20s : %s' % ('UserName', username))\n print('%-20s : %s' % ('Password', de_password))\n print('%-20s : %s' % ('Encrypted Password', password))\n except Exception as e:\n print(f'{filepath:=^100}\\nError:{e}')\n\n\ndef decrypt_dir():\n for root, dirs, files in os.walk(KEY):\n for f in files:\n decrypt_file(os.path.join(root, f))\n\n\ndef setDefaultSessionDirByVer():\n if not is_number(VERSION):\n return\n ver = float(VERSION)\n dir = 'Xshell' if IS_XSH else 'Xftp'\n global KEY\n if ver < 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang\\\\%s\\\\Sessions' % dir)\n elif ver == 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang Computer\\\\6\\\\%s\\\\Sessions' % dir)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='xsh, xfp password decrypt')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('-e', '--encrypt', default=False, help=\n '<-e | -d> encrypt password, default -d', action='store_true')\n group.add_argument('-d', '--decrypt', default=True, help=\n '<-e | -d> decrypt encrypted password, default -d', action='store_true'\n )\n parser.add_argument('-f', '--ftp', default=False, help=\n 'xftp or xshell. Ignore if it is xshell', action='store_true')\n parser.add_argument('-u', '--username', default='', type=str, help=\n 'user `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-m', '--master_pwd', default='', type=str, help=\n \"user's master password. Used by version >= 6\")\n parser.add_argument('-s', '--sid', default='', type=str, help=\n 'SID `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-v', '--version', default='', type=str, help=\n 'xsh or xfp version. If not specified, 5.2 will be used.')\n parser.add_argument('-k', '--key', default='', nargs='?', help=\n 'the path of sessions directory or file of xsh or xfp, or password or other key'\n )\n args = parser.parse_args()\n if args.encrypt:\n IS_DECRYPT = False\n if args.sid:\n SID = args.sid\n if args.username:\n USERNAME = args.username\n if args.master_pwd:\n MASTER_PWD = args.master_pwd\n if args.ftp:\n IS_XSH = False\n if is_number(args.version):\n VERSION = args.version\n if args.key:\n KEY = args.key\n if not args.key and (is_number(args.version) or args.ftp):\n setDefaultSessionDirByVer()\n if IS_DECRYPT:\n if os.path.isdir(KEY):\n decrypt_dir()\n elif os.path.isfile(KEY):\n decrypt_file(KEY)\n else:\n decrypt_string(KEY)\n else:\n encrypt_string(KEY)\n",
"step-4": "import os\nimport argparse\nimport configparser\nimport unicodedata\nfrom win32api import GetComputerName, GetUserName\nfrom win32security import LookupAccountName, ConvertSidToStringSid\nfrom base64 import b64encode, b64decode\nfrom Cryptodome.Hash import MD5, SHA256\nfrom Cryptodome.Cipher import ARC4\nUSERNAME = GetUserName()\nMASTER_PWD = None\nSID = ConvertSidToStringSid(LookupAccountName(GetComputerName(),\n GetUserName())[0])\nIS_XSH = True\nVERSION = '5.2'\nKEY = os.path.join(os.environ['USERPROFILE'],\n 'Documents\\\\NetSarang\\\\Xshell\\\\Sessions')\nIS_DECRYPT = True\n\n\ndef getCipherKey():\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n if 0 < ver and ver < 5.1:\n if IS_XSH:\n return MD5.new(b'!X@s#h$e%l^l&').digest()\n else:\n return MD5.new(b'!X@s#c$e%l^l&').digest()\n elif 5.1 <= ver and ver <= 5.2:\n return SHA256.new(SID.encode()).digest()\n elif 5.2 < ver:\n if MASTER_PWD == None:\n return SHA256.new((USERNAME + SID).encode()).digest()\n else:\n return SHA256.new(MASTER_PWD.encode()).digest()\n else:\n raise ValueError('Invalid argument: --Version')\n\n\ndef encrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n if ver < 5.1:\n en_password = b64encode(Cipher.encrypt(password_string.encode())\n ).decode()\n else:\n checksum = SHA256.new(password_string.encode()).digest()\n ciphertext = Cipher.encrypt(password_string.encode())\n en_password = b64encode(ciphertext + checksum).decode()\n if need_return:\n return en_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Encrypted Password', en_password))\n\n\ndef decrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n ver = float(VERSION)\n Cipher = ARC4.new(getCipherKey())\n try:\n if ver < 5.1:\n de_password = Cipher.decrypt(b64decode(password_string)).decode()\n else:\n data = b64decode(password_string)\n ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256\n .digest_size:]\n plaintext = Cipher.decrypt(ciphertext)\n if SHA256.new(plaintext).digest() != checksum:\n raise ValueError('Cannot decrypt string. The key is wrong!')\n de_password = plaintext.decode('ascii')\n if need_return:\n return de_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Decrypted Password', de_password))\n except Exception as e:\n print('Password is invalid')\n\n\ndef decrypt_file(filepath: str=''):\n if not os.path.isfile(filepath):\n print(f'{filepath:=^100}\\nError: No file')\n return\n file = os.path.basename(os.path.realpath(filepath))\n if file.endswith('.xsh') or file.endswith('.xfp'):\n cfg = configparser.ConfigParser()\n try:\n cfg.read(filepath)\n except UnicodeDecodeError:\n cfg.read(filepath, encoding='utf-16')\n try:\n if file.endswith('.xsh'):\n host = cfg['CONNECTION']['Host']\n port = cfg['CONNECTION']['Port']\n username = cfg['CONNECTION:AUTHENTICATION']['UserName']\n password = cfg['CONNECTION:AUTHENTICATION']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n else:\n host = cfg['Connection']['Host']\n port = cfg['Connection']['Port']\n username = cfg['Connection']['UserName']\n password = cfg['Connection']['Password']\n version = cfg['SessionInfo']['Version']\n de_password = decrypt_string(password, True)\n print(f'{filepath:=^100}')\n print('%-20s : %s' % ('Host', host))\n print('%-20s : %s' % ('Port', port))\n print('%-20s : %s' % ('Version', version))\n print('%-20s : %s' % ('UserName', username))\n print('%-20s : %s' % ('Password', de_password))\n print('%-20s : %s' % ('Encrypted Password', password))\n except Exception as e:\n print(f'{filepath:=^100}\\nError:{e}')\n\n\ndef decrypt_dir():\n for root, dirs, files in os.walk(KEY):\n for f in files:\n decrypt_file(os.path.join(root, f))\n\n\ndef setDefaultSessionDirByVer():\n if not is_number(VERSION):\n return\n ver = float(VERSION)\n dir = 'Xshell' if IS_XSH else 'Xftp'\n global KEY\n if ver < 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang\\\\%s\\\\Sessions' % dir)\n elif ver == 6:\n KEY = os.path.join(os.environ['USERPROFILE'], \n 'Documents\\\\NetSarang Computer\\\\6\\\\%s\\\\Sessions' % dir)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='xsh, xfp password decrypt')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('-e', '--encrypt', default=False, help=\n '<-e | -d> encrypt password, default -d', action='store_true')\n group.add_argument('-d', '--decrypt', default=True, help=\n '<-e | -d> decrypt encrypted password, default -d', action='store_true'\n )\n parser.add_argument('-f', '--ftp', default=False, help=\n 'xftp or xshell. Ignore if it is xshell', action='store_true')\n parser.add_argument('-u', '--username', default='', type=str, help=\n 'user `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-m', '--master_pwd', default='', type=str, help=\n \"user's master password. Used by version >= 6\")\n parser.add_argument('-s', '--sid', default='', type=str, help=\n 'SID `whoami /user` in command. Ignore if it is local. Used by version >= 5.1'\n )\n parser.add_argument('-v', '--version', default='', type=str, help=\n 'xsh or xfp version. If not specified, 5.2 will be used.')\n parser.add_argument('-k', '--key', default='', nargs='?', help=\n 'the path of sessions directory or file of xsh or xfp, or password or other key'\n )\n args = parser.parse_args()\n if args.encrypt:\n IS_DECRYPT = False\n if args.sid:\n SID = args.sid\n if args.username:\n USERNAME = args.username\n if args.master_pwd:\n MASTER_PWD = args.master_pwd\n if args.ftp:\n IS_XSH = False\n if is_number(args.version):\n VERSION = args.version\n if args.key:\n KEY = args.key\n if not args.key and (is_number(args.version) or args.ftp):\n setDefaultSessionDirByVer()\n if IS_DECRYPT:\n if os.path.isdir(KEY):\n decrypt_dir()\n elif os.path.isfile(KEY):\n decrypt_file(KEY)\n else:\n decrypt_string(KEY)\n else:\n encrypt_string(KEY)\n",
"step-5": "# -*- coding: utf-8 -*-\n# python >= 3.7\n# supported xmanager version <5.1, 5.1, 5.2, 6\n\nimport os\nimport argparse\nimport configparser\nimport unicodedata\n\nfrom win32api import GetComputerName, GetUserName\nfrom win32security import LookupAccountName, ConvertSidToStringSid\nfrom base64 import b64encode, b64decode\nfrom Cryptodome.Hash import MD5, SHA256\nfrom Cryptodome.Cipher import ARC4\n\nUSERNAME = GetUserName()\nMASTER_PWD = None\nSID = ConvertSidToStringSid(LookupAccountName(GetComputerName(), GetUserName())[0])\nIS_XSH = True\nVERSION = '5.2'\nKEY = os.path.join(os.environ[\"USERPROFILE\"], r\"Documents\\NetSarang\\Xshell\\Sessions\")\nIS_DECRYPT = True\n\ndef getCipherKey():\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n\n ver = float(VERSION)\n if 0 < ver and ver < 5.1:\n if IS_XSH:\n return MD5.new(b'!X@s#h$e%l^l&').digest()\n else:\n return MD5.new(b'!X@s#c$e%l^l&').digest()\n elif 5.1 <= ver and ver <= 5.2:\n return SHA256.new(SID.encode()).digest()\n elif 5.2 < ver:\n if MASTER_PWD == None:\n return SHA256.new((USERNAME + SID).encode()).digest()\n else:\n return SHA256.new(MASTER_PWD.encode()).digest()\n else:\n raise ValueError('Invalid argument: --Version')\n\ndef encrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n\n ver = float(VERSION)\n\n Cipher = ARC4.new(getCipherKey())\n if ver < 5.1:\n en_password = b64encode(Cipher.encrypt(password_string.encode())).decode()\n else:\n checksum = SHA256.new(password_string.encode()).digest()\n ciphertext = Cipher.encrypt(password_string.encode())\n en_password = b64encode(ciphertext + checksum).decode()\n if need_return:\n return en_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Encrypted Password', en_password))\n\ndef decrypt_string(password_string, need_return=False):\n if not is_number(VERSION):\n raise ValueError('Invalid argument: --Version')\n\n ver = float(VERSION)\n\n Cipher = ARC4.new(getCipherKey())\n\n try:\n if ver < 5.1:\n de_password = Cipher.decrypt(b64decode(password_string)).decode()\n else:\n data = b64decode(password_string)\n ciphertext, checksum = data[:-SHA256.digest_size], data[-SHA256.digest_size:]\n plaintext = Cipher.decrypt(ciphertext)\n if SHA256.new(plaintext).digest() != checksum:\n raise ValueError('Cannot decrypt string. The key is wrong!')\n de_password = plaintext.decode('ascii')\n if need_return:\n return de_password\n else:\n print('%-20s : %s' % ('Version', VERSION))\n print('%-20s : %s' % ('Password', password_string))\n print('%-20s : %s' % ('Decrypted Password', de_password))\n\n except Exception as e:\n print(\"Password is invalid\")\n\ndef decrypt_file(filepath: str = ''):\n if not os.path.isfile(filepath):\n print(f\"{filepath:=^100}\\nError: No file\")\n return\n\n file = os.path.basename(os.path.realpath(filepath))\n\n if file.endswith(\".xsh\") or file.endswith(\".xfp\"):\n cfg = configparser.ConfigParser()\n try:\n cfg.read(filepath)\n except UnicodeDecodeError:\n cfg.read(filepath, encoding=\"utf-16\")\n\n try:\n if file.endswith(\".xsh\"):\n host = cfg[\"CONNECTION\"][\"Host\"]\n port = cfg[\"CONNECTION\"][\"Port\"]\n username = cfg[\"CONNECTION:AUTHENTICATION\"][\"UserName\"]\n password = cfg[\"CONNECTION:AUTHENTICATION\"][\"Password\"]\n version = cfg[\"SessionInfo\"][\"Version\"]\n\n de_password = decrypt_string(password, True)\n else:\n host = cfg[\"Connection\"][\"Host\"]\n port = cfg[\"Connection\"][\"Port\"]\n username = cfg[\"Connection\"][\"UserName\"]\n password = cfg[\"Connection\"][\"Password\"]\n version = cfg[\"SessionInfo\"][\"Version\"]\n\n de_password = decrypt_string(password, True)\n\n print(f\"{filepath:=^100}\")\n print('%-20s : %s' % ('Host', host))\n print('%-20s : %s' % ('Port', port))\n print('%-20s : %s' % ('Version', version))\n print('%-20s : %s' % ('UserName', username))\n print('%-20s : %s' % ('Password', de_password))\n print('%-20s : %s' % ('Encrypted Password', password))\n except Exception as e:\n print(f\"{filepath:=^100}\\nError:{e}\")\n\ndef decrypt_dir():\n for root, dirs, files in os.walk(KEY):\n for f in files:\n decrypt_file(os.path.join(root, f))\n\ndef setDefaultSessionDirByVer():\n if not is_number(VERSION):\n return\n ver = float(VERSION)\n dir = 'Xshell' if IS_XSH else 'Xftp';\n global KEY\n if ver < 6:\n KEY = os.path.join(os.environ[\"USERPROFILE\"], r\"Documents\\NetSarang\\%s\\Sessions\" % dir)\n elif ver == 6:\n KEY = os.path.join(os.environ[\"USERPROFILE\"], r\"Documents\\NetSarang Computer\\6\\%s\\Sessions\" % dir)\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n\n return False\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"xsh, xfp password decrypt\")\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"-e\", \"--encrypt\", default=False,\n help=\"<-e | -d> encrypt password, default -d\", action=\"store_true\")\n group.add_argument(\"-d\", \"--decrypt\", default=True,\n help=\"<-e | -d> decrypt encrypted password, default -d\", action=\"store_true\")\n parser.add_argument(\"-f\", \"--ftp\", default=False,\n help=\"xftp or xshell. Ignore if it is xshell\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--username\", default=\"\", type=str,\n help=\"user `whoami /user` in command. Ignore if it is local. Used by version >= 5.1\")\n parser.add_argument(\"-m\", \"--master_pwd\", default=\"\", type=str,\n help=\"user\\'s master password. Used by version >= 6\")\n parser.add_argument(\"-s\", \"--sid\", default=\"\", type=str,\n help=\"SID `whoami /user` in command. Ignore if it is local. Used by version >= 5.1\")\n parser.add_argument(\"-v\", \"--version\", default=\"\", type=str,\n help=\"xsh or xfp version. If not specified, 5.2 will be used.\")\n parser.add_argument(\"-k\", \"--key\", default=\"\", nargs='?',\n help=\"the path of sessions directory or file of xsh or xfp, or password or other key\")\n\n args = parser.parse_args()\n\n #print(args)\n\n if args.encrypt:\n IS_DECRYPT = False\n if args.sid:\n SID = args.sid\n if args.username:\n USERNAME = args.username\n if args.master_pwd:\n MASTER_PWD = args.master_pwd\n if args.ftp:\n IS_XSH = False\n if is_number(args.version):\n VERSION = args.version\n if args.key:\n KEY = args.key\n\n if not args.key and (is_number(args.version) or args.ftp):\n setDefaultSessionDirByVer()\n\n if IS_DECRYPT:\n if os.path.isdir(KEY):\n decrypt_dir()\n elif os.path.isfile(KEY):\n decrypt_file(KEY)\n else:\n decrypt_string(KEY)\n else:\n encrypt_string(KEY)",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
# coding: utf8
from __future__ import unicode_literals
from nltk.tag import stanford
from .SequenceTagger import SequenceTagger
class POSTagger(SequenceTagger):
"""
>>> tagger = POSTagger(model='resources/postagger.model')
>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])
[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]
"""
class StanfordPOSTagger(stanford.StanfordPOSTagger):
"""
>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')
>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])
[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]
"""
def __init__(self, model_filename, path_to_jar, *args, **kwargs):
self._SEPARATOR = '/'
super(stanford.StanfordPOSTagger, self).__init__(model_filename=model_filename, path_to_jar=path_to_jar, *args, **kwargs)
def tag(self, tokens):
return self.tag_sents([tokens])[0]
def tag_sents(self, sentences):
refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)
return super(stanford.StanfordPOSTagger, self).tag_sents(refined)
|
normal
|
{
"blob_id": "1ac3630e6433a2d11c716b558640cab7c559f6ba",
"index": 4483,
"step-1": "<mask token>\n\n\nclass StanfordPOSTagger(stanford.StanfordPOSTagger):\n <mask token>\n\n def __init__(self, model_filename, path_to_jar, *args, **kwargs):\n self._SEPARATOR = '/'\n super(stanford.StanfordPOSTagger, self).__init__(*args,\n model_filename=model_filename, path_to_jar=path_to_jar, **kwargs)\n <mask token>\n\n def tag_sents(self, sentences):\n refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)\n return super(stanford.StanfordPOSTagger, self).tag_sents(refined)\n",
"step-2": "<mask token>\n\n\nclass StanfordPOSTagger(stanford.StanfordPOSTagger):\n \"\"\"\n\t>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n def __init__(self, model_filename, path_to_jar, *args, **kwargs):\n self._SEPARATOR = '/'\n super(stanford.StanfordPOSTagger, self).__init__(*args,\n model_filename=model_filename, path_to_jar=path_to_jar, **kwargs)\n\n def tag(self, tokens):\n return self.tag_sents([tokens])[0]\n\n def tag_sents(self, sentences):\n refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)\n return super(stanford.StanfordPOSTagger, self).tag_sents(refined)\n",
"step-3": "<mask token>\n\n\nclass POSTagger(SequenceTagger):\n \"\"\"\n\t>>> tagger = POSTagger(model='resources/postagger.model')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n\nclass StanfordPOSTagger(stanford.StanfordPOSTagger):\n \"\"\"\n\t>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n def __init__(self, model_filename, path_to_jar, *args, **kwargs):\n self._SEPARATOR = '/'\n super(stanford.StanfordPOSTagger, self).__init__(*args,\n model_filename=model_filename, path_to_jar=path_to_jar, **kwargs)\n\n def tag(self, tokens):\n return self.tag_sents([tokens])[0]\n\n def tag_sents(self, sentences):\n refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)\n return super(stanford.StanfordPOSTagger, self).tag_sents(refined)\n",
"step-4": "from __future__ import unicode_literals\nfrom nltk.tag import stanford\nfrom .SequenceTagger import SequenceTagger\n\n\nclass POSTagger(SequenceTagger):\n \"\"\"\n\t>>> tagger = POSTagger(model='resources/postagger.model')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n\nclass StanfordPOSTagger(stanford.StanfordPOSTagger):\n \"\"\"\n\t>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n def __init__(self, model_filename, path_to_jar, *args, **kwargs):\n self._SEPARATOR = '/'\n super(stanford.StanfordPOSTagger, self).__init__(*args,\n model_filename=model_filename, path_to_jar=path_to_jar, **kwargs)\n\n def tag(self, tokens):\n return self.tag_sents([tokens])[0]\n\n def tag_sents(self, sentences):\n refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)\n return super(stanford.StanfordPOSTagger, self).tag_sents(refined)\n",
"step-5": "# coding: utf8\n\nfrom __future__ import unicode_literals\nfrom nltk.tag import stanford\nfrom .SequenceTagger import SequenceTagger\n\n\nclass POSTagger(SequenceTagger):\n\t\"\"\"\n\t>>> tagger = POSTagger(model='resources/postagger.model')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n\nclass StanfordPOSTagger(stanford.StanfordPOSTagger):\n\t\"\"\"\n\t>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')\n\t>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])\n\t[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]\n\t\"\"\"\n\n\tdef __init__(self, model_filename, path_to_jar, *args, **kwargs):\n\t\tself._SEPARATOR = '/'\n\t\tsuper(stanford.StanfordPOSTagger, self).__init__(model_filename=model_filename, path_to_jar=path_to_jar, *args, **kwargs)\n\n\tdef tag(self, tokens):\n\t\treturn self.tag_sents([tokens])[0]\n\n\tdef tag_sents(self, sentences):\n\t\trefined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)\n\t\treturn super(stanford.StanfordPOSTagger, self).tag_sents(refined)\n",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="数据结构"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("二叉树——递归套路")
r2=s2.getRootTopic()
r2.setTitle("二叉树——递归套路")
content={
'递归套路':[
'可解决面试中绝大多数二叉树问题,尤其是树型dp问题',
'本质是利用递归遍历二叉树的便利性'
],
'思路':[
'1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',
'2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',
'4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求',
'6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'
],
'题目1':[
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',
{'思路':[
'1.左子树是否平衡',
'2.右子树是否平衡',
'3.左树与右树高在2以内',
]},
{'实现':[
'Class Info(){',
' boolean isBalanced;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(node==null){',
' return node;',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',
' isBalanced=false;',
' }',
' return new Info(isBalanced,height);',
'}'
]}
],
'题目2':[
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离',
'返回整棵二叉树的最大距离',
{'思路':[
{'1.与头节点无关':[
'max(左侧的最大距离,右侧的最大距离)',
]},
{'2.与头节点有头':[
'左树高+右树高+1'
]}
]},
{'实现':[
'Class Info(){',
' int maxDistance;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(head==null){',
' return new Info(0,0);',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);',
'}'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
normal
|
{
"blob_id": "b713e38824db13f919484b071fb35afb29e26baa",
"index": 3803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, parentdir)\n<mask token>\ns2.setTitle('二叉树——递归套路')\n<mask token>\nr2.setTitle('二叉树——递归套路')\n<mask token>\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-3": "<mask token>\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\n<mask token>\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-4": "import os, sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-5": "import os,sys \nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) \nsys.path.insert(0,parentdir) \n\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name=\"数据结构\"\nw = xmind.load(os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") \ns2=w.createSheet()\ns2.setTitle(\"二叉树——递归套路\")\nr2=s2.getRootTopic()\nr2.setTitle(\"二叉树——递归套路\")\n\n\ncontent={\n'递归套路':[\n '可解决面试中绝大多数二叉树问题,尤其是树型dp问题',\n '本质是利用递归遍历二叉树的便利性'\n],\n'思路':[\n '1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',\n '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',\n '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求',\n '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'\n],\n'题目1':[\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',\n {'思路':[\n '1.左子树是否平衡',\n '2.右子树是否平衡',\n '3.左树与右树高在2以内',\n ]},\n {'实现':[\n 'Class Info(){',\n ' boolean isBalanced;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(node==null){',\n ' return node;',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',\n ' isBalanced=false;',\n ' }',\n ' return new Info(isBalanced,height);',\n '}'\n ]}\n],\n'题目2':[\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离',\n '返回整棵二叉树的最大距离',\n {'思路':[\n {'1.与头节点无关':[\n 'max(左侧的最大距离,右侧的最大距离)',\n ]},\n {'2.与头节点有头':[\n '左树高+右树高+1'\n ]}\n ]},\n {'实现':[\n 'Class Info(){',\n ' int maxDistance;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(head==null){',\n ' return new Info(0,0);',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);',\n '}'\n ]}\n \n]\n\n}\n#构建xmind\nxmind.build(content,r2)\n#保存xmind\nxmind.save(w,os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-28 17:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('KYusers', '0017_caprofile_regs'),
]
operations = [
migrations.AddField(
model_name='message',
name='mard_read',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='caprofile',
name='regs',
field=models.ManyToManyField(blank=True, related_name='regs', to='KYusers.KYProfile'),
),
]
|
normal
|
{
"blob_id": "12c3fe8a3ca1e660eeb90b16eca17eddd47e5de7",
"index": 7124,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('KYusers', '0017_caprofile_regs')]\n operations = [migrations.AddField(model_name='message', name=\n 'mard_read', field=models.BooleanField(default=False)), migrations.\n AlterField(model_name='caprofile', name='regs', field=models.\n ManyToManyField(blank=True, related_name='regs', to=\n 'KYusers.KYProfile'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('KYusers', '0017_caprofile_regs')]\n operations = [migrations.AddField(model_name='message', name=\n 'mard_read', field=models.BooleanField(default=False)), migrations.\n AlterField(model_name='caprofile', name='regs', field=models.\n ManyToManyField(blank=True, related_name='regs', to=\n 'KYusers.KYProfile'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.8 on 2016-10-28 17:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('KYusers', '0017_caprofile_regs'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='mard_read',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='caprofile',\n name='regs',\n field=models.ManyToManyField(blank=True, related_name='regs', to='KYusers.KYProfile'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
plugins_list = []
class PluginType(type):
def __init__(cls, name, bases, attrs):
super(PluginType, cls).__init__(name, bases, attrs)
# registrar el plugin en la lista
if not cls in plugins_list:
plugins_list.append(cls)
class PluginBase(object):
'''
Clase base para todos los plugins
'''
__metaclass__ = PluginType
pass
|
normal
|
{
"blob_id": "b670655e3a8e88b97eed35e187b01d6524a16af3",
"index": 7709,
"step-1": "<mask token>\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-2": "<mask token>\n\n\nclass PluginType(type):\n <mask token>\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-3": "<mask token>\n\n\nclass PluginType(type):\n\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-4": "plugins_list = []\n\n\nclass PluginType(type):\n\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nplugins_list = []\n\nclass PluginType(type):\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n\n # registrar el plugin en la lista\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n '''\n Clase base para todos los plugins\n '''\n\n __metaclass__ = PluginType\n\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import config
import math
import pygame
import utils
class Rocket:
def __init__(self):
self.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];
self.angle = config.initialPosition['angle'];
self.angle = utils.wrapToPi(self.angle);
self.dh = config.game['scale']*config.rocket['height']/2; #half display height
self.dw = config.game['scale']*config.rocket['width']/2; # half display height
self.pl = 0 #left motor power
self.pr = 0 #right motor power
def draw(self, display):
pSin = math.sin(self.angle); # precalculated sin
pCos = math.cos(self.angle); # precalculated cos
#main body
pygame.draw.polygon(
display,
config.colors['green'],
[
[
self.x+self.dw*pSin+self.dh*pCos,
self.y+self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin+self.dh*pCos,
self.y-self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin-self.dh*pCos,
self.y-self.dw*pCos+self.dh*pSin,
], [
self.x+self.dw*pSin-self.dh*pCos,
self.y+self.dw*pCos+self.dh*pSin,
]
]
);
#left motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pl)*pCos
+(-self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pl)*pSin
+(-self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-5*self.dw/6)*pCos,
]
]
)
#right motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pr)*pCos
+(self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pr)*pSin
+(self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(5*self.dw/6)*pCos,
]
]
)
def update(self, x, y, angle, leftPower, rightPower):
self.x = x*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];
self.angle = angle
self.angle = utils.wrapToPi(self.angle);
self.pl = leftPower;
if(self.pl<0):
self.pl = 0
elif self.pl>1:
self.pl = 1
self.pr = rightPower;
if(self.pr<0):
self.pr = 0
elif self.pr>1:
self.pr = 1
|
normal
|
{
"blob_id": "7a1a9d2e773fb783d8522f1ea51e753d5d3782e9",
"index": 7517,
"step-1": "<mask token>\n\n\nclass Rocket:\n <mask token>\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-2": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-3": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-4": "import config\nimport math\nimport pygame\nimport utils\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-5": "import config\nimport math\nimport pygame\nimport utils\n\nclass Rocket:\n\tdef __init__(self):\n\t\tself.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];\n\n\t\tself.angle = config.initialPosition['angle'];\n\t\tself.angle = utils.wrapToPi(self.angle);\n\t\tself.dh = config.game['scale']*config.rocket['height']/2; #half display height\n\t\tself.dw = config.game['scale']*config.rocket['width']/2; # half display height\n\t\tself.pl = 0 #left motor power\n\t\tself.pr = 0 #right motor power\n\n\tdef draw(self, display):\n\t\tpSin = math.sin(self.angle); # precalculated sin\n\t\tpCos = math.cos(self.angle); # precalculated cos\n\t\t\n\t\t#main body\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['green'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x+self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos+self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x+self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos+self.dh*pSin,\n\t\t\t\t]\n\t\t\t]\n\t\t\n\t\t);\n\n\t\t#left motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pl)*pCos\n\t\t\t\t\t+(-self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pl)*pSin\n\t\t\t\t\t+(-self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\t\t#right motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pr)*pCos\n\t\t\t\t\t+(self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pr)*pSin\n\t\t\t\t\t+(self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\tdef update(self, x, y, angle, leftPower, rightPower):\n\t\tself.x = x*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];\n\n\t\tself.angle = angle\n\t\tself.angle = utils.wrapToPi(self.angle);\n\n\t\tself.pl = leftPower;\n\t\tif(self.pl<0):\n\t\t\tself.pl = 0\n\t\telif self.pl>1:\n\t\t\tself.pl = 1\n\n\t\tself.pr = rightPower;\n\t\tif(self.pr<0):\n\t\t\tself.pr = 0\n\t\telif self.pr>1:\n\t\t\tself.pr = 1\n\n\t\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Node(object):
def __init__(self, d, n=None):
self.data = d
self.next_node = n
def get_data(self):
return self.data
def set_data(self, d):
self.data = d
def get_next(self):
return self.next_node
def set_next(self, n):
self.next_node=n
class LinkedList(object):
def __init__(self, r=None):
self.root = r
self.size = 0
def get_size(self):
return self.size
def add(self, d):
"""
To Add a new node or prepend a new node
"""
new_node = Node(d)
self.root = new_node
self.size += 1
return d
def append(self, d):
this_node = self.root
while this_node is not None:
if this_node.get_next() is None:
this_node.set_next(Node(d))
self.size += 1
return d
else:
this_node=this_node.get_next()
def remove(self, d):
this_node = self.root
prev_node = None
while this_node is not None:
if this_node.get_data() == d:
if prev_node:
prev_node.set_next(this_node.get_next())
self.size -= 1
return True
else:
prev_node = this_node
this_node = this_node.get_next()
return false
def find(self, d):
this_node = self.root
while this_node is not None:
if this_node.get_data() == d:
return True
else:
this_node=this_node.get_next()
return False
myList=LinkedList()
myList.add(1)
myList.append(2)
print myList.find(2)
print myList.get_size()
myList.remove(1)
print myList.find(2)
print myList.get_size()
|
normal
|
{
"blob_id": "de3e952ad43fe7e323e8f975a45bbd4eec7192db",
"index": 3481,
"step-1": "class Node(object):\n\n def __init__(self, d, n=None):\n self.data = d\n self.next_node = n\n\n def get_data(self):\n return self.data\n\n def set_data(self, d):\n self.data = d\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, n):\n self.next_node=n\n\n\nclass LinkedList(object):\n\n def __init__(self, r=None):\n self.root = r\n self.size = 0\n\n def get_size(self):\n return self.size\n\n def add(self, d):\n \"\"\"\n To Add a new node or prepend a new node\n \"\"\"\n new_node = Node(d)\n self.root = new_node\n self.size += 1\n return d\n\n def append(self, d):\n this_node = self.root\n while this_node is not None:\n if this_node.get_next() is None:\n this_node.set_next(Node(d))\n self.size += 1\n return d\n else:\n this_node=this_node.get_next()\n\n def remove(self, d):\n this_node = self.root\n prev_node = None\n while this_node is not None:\n if this_node.get_data() == d:\n if prev_node:\n prev_node.set_next(this_node.get_next())\n self.size -= 1\n return True\n else:\n prev_node = this_node\n this_node = this_node.get_next()\n\n return false\n\n def find(self, d):\n this_node = self.root\n while this_node is not None:\n if this_node.get_data() == d:\n return True\n else:\n this_node=this_node.get_next()\n return False\n\n\nmyList=LinkedList()\nmyList.add(1)\nmyList.append(2)\nprint myList.find(2)\nprint myList.get_size()\nmyList.remove(1)\nprint myList.find(2)\nprint myList.get_size()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Description: This modules is used for testing. Testing is performed based on the list of commands given to perform in a website
Version : v1.5
History :
v1.0 - 08/01/2016 - Initial version
v1.1 - 08/05/2016 - Modified to accept List input.
v1.2 - 08/05/2016 - Removed dead code in feed_input
v1.3 - 08/05/2016 - Added function get_data_dictionary to return the fetched values
v1.4 - 09/01/2016 - updated _print_ function and added log_process_status variable
v1.5 - 09/22/2016 - variable to suppress output running. Default - output will be written to file.
Open Issues: None.
Pending : Enhance coding standards. Clean up dead code in feed_input function
"""
__version__ = "1.0.0"
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from URL_Status import *
import time # for sleep
import requests #to check status of the page
from Utilities import *
class PatternScraping():
def __init__(self,output_filename=None,developer_mode=False,print_instance=None,browser_instance=None,log_process_status=True,write_output=True):
self.developer_mode = developer_mode
self.log_process_status=log_process_status
if output_filename:
self.output_filename=output_filename
else:
self.output_filename='PatternScraping.' + get_timestamp_for_file() + '.testing.txt'
self.write_output=write_output
self.possible_commands = ['GO', 'GET_VALUE', 'CLICK', 'ENTER_VALUE','EXIT', 'SLEEP', 'GET_VALUES','GET_LINKS']
self.possible_command_types = ['ID', 'XPATH', 'NAME', 'CLASS', 'CSS']
self.browser = None
self.ins_browser=browser_instance
self.initiate_print_instance(instance_instance=print_instance)
def _print_(self,input_string_in,skip_timestamp=False,add_leading_space=True,message_priority=''):
module_name='PatternScraping'
input_string=input_string_in
if isinstance(input_string,str):
input_string = get_html_to_unicode_string(input_string)
if self.print_instance:
self.print_instance.customPrint(input_string,skip_timestamp=skip_timestamp,add_leading_space=add_leading_space,module_name=module_name,message_priority=message_priority)
else:
print_string=u'' + module_name + '\t' + message_priority + '\t' + input_string
if not skip_timestamp:
print_string = log_time_stamp() + print_string
print get_printable_string(print_string)
def initiate_print_instance(self,instance_instance=None):
self.print_instance=None
if instance_instance:
try:
if instance_instance.check():
self.print_instance=instance_instance
return True
except:
return False
return False
def validate_input_commands(self,list_of_commands):#commands have tupple
print_prefix='validate_input_commands\t'
for i in range(len(list_of_commands)):
if self.developer_mode:
self._print_(print_prefix + 'Current Input:' + str(list_of_commands[i]))
if list_of_commands[i][0] not in self.possible_commands:
self._print_(print_prefix + 'Command not in list:' + str(list_of_commands[i][0]))
custom_exit()
line_no = str(i + 1)
list_length = len(list_of_commands[i])
command_name=list_of_commands[i][0]
if command_name not in ['GO','SLEEP','EXIT'] and list_of_commands[i][1] not in self.possible_command_types:
status="Unknown command type"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GO':
if not list_of_commands[i][1]:
status = "no link provided" + " in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUE':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no data provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUES':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no link provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'CLICK':
if list_length != 3 and list_length != 5:
status = "click command length error "+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if any(list_of_commands[i]) is False:
status = "click syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'ENTER_VALUE':
if not (list_length == 4 and list_of_commands[i][2]
and list_of_commands[i][3]):
status = "ENTER VALUE syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'SLEEP':
if not (list_of_commands[i][1] and (list_length == 2)):
status = "SLEEP time not provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'EXIT':
if list_length != 1:
status = "Exit syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
return True
def feed_input(self, input_commands):
print_prefix='feed_input\t'
self.data_dict = {}
#if self.developer_mode: self._print_(self.browser.page_source)
if isinstance(input_commands,str):
with open(input_commands, "r") as fopen:
self.base_list_of_lists = []
self.command_list = fopen.readlines()
for each_line in self.command_list:
self.base_list_of_lists.append((each_line.replace("\n", "")).split("\t"))
elif isinstance(input_commands,list):
self.base_list_of_lists=input_commands
else:
self._print_(print_prefix + ' Input argument should be either string(filename) or list(commands). Passed:' + str(type(input_commands)))
custom_exit()
input_status=self.validate_input_commands(self.base_list_of_lists)
if self.developer_mode and input_status:
self._print_(print_prefix + 'Input is Valid')
return True
def run(self):
if not self.ins_browser:
if not self.browser:
self.browser = webdriver.PhantomJS()#Chrome()
else:
self.browser=self.ins_browser
i = 0
for each_list in self.base_list_of_lists:
if self.developer_mode:
self._print_('Input:\t' + str(i + 1) + '\t' + str(each_list))
line = '\t'.join(each_list)
if each_list[0] == 'GO':
try:
status = self.go(each_list)
if self.developer_mode: self._print_('Command:\tGO\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUE':
try:
status = self.get_value(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUES':
# self._print_(self.browser.page_source.encode('utf-8')
try:
status = self.get_values(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUES\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_LINKS':
try:
self.file_write(line, "Links as below")
status = self.get_links(each_list)
if self.developer_mode: self._print_('Command:\tGET_LINKS\tStatus\t' + str(status))
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'CLICK':
try:
status = self.click(each_list)
if self.developer_mode: self._print_('Command:\tCLICK\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'ENTER_VALUE':
try:
status = self.enter_value(each_list)
if self.developer_mode: self._print_('Command:\tENTER_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'SLEEP':
self.sleep(each_list[1])
status = "Slept for " + each_list[1] + " second(s)"
if self.developer_mode: self._print_('Command:\tSLEEP\tStatus\t' + str(status))
self.file_write(line, status)
elif each_list[0] == 'EXIT':
self.file_write("EXIT", "OK")
if self.developer_mode: self._print_('Command:\tEXIT')
self.browser.quit()
i += 1
def go(self, list_of_values):
self.browser.get(list_of_values[1])
r = requests.get(list_of_values[1])
time.sleep(2)
link_status = r.status_code
return link_status
def close(self):
if not self.ins_browser:
if self.browser:
self.browser.quit()
def click(self, list_of_values):
try:
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CLASS':
a_obj = self.find_by_class(list_of_values[2])
if len(list_of_values) == 3:
a_obj.click()
return "OK"
elif len(list_of_values) > 3:
if list_of_values[4] == 'Available':
if list_of_values[3] in self.data_dict.keys():
a_obj.click()
return "OK"
else:
return "Not available"
elif list_of_values[4] == 'Not Available':
if list_of_values[3] not in self.data_dict.keys():
a_obj.click()
self._print_('Function:\tclick\tCondition:\t' + 'Available')
return "OK"
else:
return "Not available"
else:
if list_of_values[4] == self.data_dict[list_of_values[3]]:
a_obj.click()
return "OK"
else:
return "Not available"
except NoSuchElementException as e:
self._print_('Function:\tclick\tError:\t' + str(e) + '\t Input:' + str(list_of_values))
return "Not available"
def get_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
self.data_dict[list_of_values[3]] = a_obj.text
if self.developer_mode: self._print_('Function\tget_value\tData:\t' + str(self.data_dict))
return a_obj.text
return "Not available"
def get_values(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
elements = self.find_by_css_selector(list_of_values[2])
elif list_of_values[1] == 'XPATH':
elements = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
elements = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CSS':
elements = self.find_by_css_selector(list_of_values[2])
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
return new_news_list
def get_links(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
path = "div."+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
elif list_of_values[1] == 'ID':
path = "div#"+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
if new_news_list: #do we need to check the 4th argument
self.data_dict[list_of_values[3]]=new_news_list
main_window = self.browser.current_window_handle
if self.developer_mode: self._print_('Function\tget_links\tData:\t' + str(new_news_list))
self.file_write("",str(len(new_news_list))+ " link(s) found. Their status are: (link"+"\t"+"is_url_active"+"\t"+"is_redirected"+"\t"+"redirected_to"+")")
for each_link in new_news_list:
res_dict = url_check_status(each_link)
line = each_link+"\t"+res_dict['URL_Active']+"\t"+res_dict['Redirected']
self.file_write(line, res_dict['Redirected_into'])
return new_news_list
def enter_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
if list_of_values[3] == "Keys.ENTER":
a_obj.send_keys(Keys.ENTER)
else:
a_obj.send_keys(list_of_values[3])
return "Value entered"
return "Not available"
def sleep(self, sleep_time):
time.sleep(float(sleep_time))
return True
def find_by_id(self, input_id):
input_id_obj = self.browser.find_element_by_id(input_id)
return input_id_obj
def find_elements_by_id(self, input_id):
input_id_obj = self.browser.find_elements_by_id(input_id)
return input_id_obj
def find_by_xpath(self, input_xpath):
input_xpath_obj = self.browser.find_element_by_xpath(input_xpath)
return input_xpath_obj
def find_by_name(self, input_name):
input_id_obj = self.browser.find_element_by_name(input_name)
return input_id_obj
def find_by_class(self, input_name):
input_class_obj = self.browser.find_element_by_class_name(input_name)
return input_class_obj
def find_by_css_selector(self, input_name):
input_class_obj = self.browser.find_elements_by_css_selector(input_name)
return input_class_obj
def file_write(self, command_line, status):
if self.write_output:
with open(self.output_filename, "a") as result_file:
result_file.write(command_line + "\t" + str(status) + "\n")
def get_data_dictionary(self):
return self.data_dict
if __name__ == '__main__':
# input_filename = 'input.txt'
input_filename = 'input_22.txt'
output_filename = 'output.txt'
obj = PatternScraping(developer_mode=True)
obj.feed_input([['GO','https://www.google.com'],['SLEEP','1'],['ENTER_VALUE','ID','lst-ib','Testing Automation'],['CLICK','NAME','btnG'],['SLEEP','5'],['EXIT']])
obj.run()
|
normal
|
{
"blob_id": "9e77385933cf6e381f25bea9020f909d5dc6817d",
"index": 4744,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\n Description: This modules is used for testing. Testing is performed based on the list of commands given to perform in a website\n Version : v1.5\n History :\n v1.0 - 08/01/2016 - Initial version\n v1.1 - 08/05/2016 - Modified to accept List input.\n v1.2 - 08/05/2016 - Removed dead code in feed_input\n v1.3 - 08/05/2016 - Added function get_data_dictionary to return the fetched values\n v1.4 - 09/01/2016 - updated _print_ function and added log_process_status variable\n v1.5 - 09/22/2016 - variable to suppress output running. Default - output will be written to file.\n Open Issues: None.\n Pending : Enhance coding standards. Clean up dead code in feed_input function\n\"\"\"\n__version__ = \"1.0.0\"\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom URL_Status import *\nimport time # for sleep\nimport requests #to check status of the page\nfrom Utilities import *\nclass PatternScraping():\n\n def __init__(self,output_filename=None,developer_mode=False,print_instance=None,browser_instance=None,log_process_status=True,write_output=True):\n self.developer_mode = developer_mode\n self.log_process_status=log_process_status\n if output_filename:\n self.output_filename=output_filename\n else:\n self.output_filename='PatternScraping.' + get_timestamp_for_file() + '.testing.txt'\n self.write_output=write_output\n self.possible_commands = ['GO', 'GET_VALUE', 'CLICK', 'ENTER_VALUE','EXIT', 'SLEEP', 'GET_VALUES','GET_LINKS']\n self.possible_command_types = ['ID', 'XPATH', 'NAME', 'CLASS', 'CSS']\n self.browser = None\n self.ins_browser=browser_instance\n self.initiate_print_instance(instance_instance=print_instance)\n\n def _print_(self,input_string_in,skip_timestamp=False,add_leading_space=True,message_priority=''):\n module_name='PatternScraping'\n input_string=input_string_in\n if isinstance(input_string,str):\n input_string = get_html_to_unicode_string(input_string)\n if self.print_instance:\n self.print_instance.customPrint(input_string,skip_timestamp=skip_timestamp,add_leading_space=add_leading_space,module_name=module_name,message_priority=message_priority)\n else:\n print_string=u'' + module_name + '\\t' + message_priority + '\\t' + input_string\n if not skip_timestamp:\n print_string = log_time_stamp() + print_string\n print get_printable_string(print_string)\n def initiate_print_instance(self,instance_instance=None):\n self.print_instance=None\n if instance_instance:\n try:\n if instance_instance.check():\n self.print_instance=instance_instance\n return True\n except: \n return False \n return False\n def validate_input_commands(self,list_of_commands):#commands have tupple\n print_prefix='validate_input_commands\\t'\n for i in range(len(list_of_commands)):\n if self.developer_mode:\n self._print_(print_prefix + 'Current Input:' + str(list_of_commands[i]))\n if list_of_commands[i][0] not in self.possible_commands:\n self._print_(print_prefix + 'Command not in list:' + str(list_of_commands[i][0]))\n custom_exit()\n line_no = str(i + 1)\n list_length = len(list_of_commands[i])\n command_name=list_of_commands[i][0]\n if command_name not in ['GO','SLEEP','EXIT'] and list_of_commands[i][1] not in self.possible_command_types:\n status=\"Unknown command type\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GO':\n if not list_of_commands[i][1]:\n status = \"no link provided\" + \" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GET_VALUE':\n if list_length != 4 or any(list_of_commands[i]) is False:\n status = \"no data provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GET_VALUES':\n if list_length != 4 or any(list_of_commands[i]) is False:\n status = \"no link provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'CLICK':\n if list_length != 3 and list_length != 5:\n status = \"click command length error \"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if any(list_of_commands[i]) is False:\n status = \"click syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'ENTER_VALUE':\n if not (list_length == 4 and list_of_commands[i][2]\n and list_of_commands[i][3]):\n status = \"ENTER VALUE syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'SLEEP':\n if not (list_of_commands[i][1] and (list_length == 2)):\n status = \"SLEEP time not provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'EXIT':\n if list_length != 1:\n status = \"Exit syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n return True\n def feed_input(self, input_commands):\n print_prefix='feed_input\\t'\n self.data_dict = {}\n #if self.developer_mode: self._print_(self.browser.page_source)\n if isinstance(input_commands,str):\n with open(input_commands, \"r\") as fopen:\n self.base_list_of_lists = []\n self.command_list = fopen.readlines()\n for each_line in self.command_list:\n self.base_list_of_lists.append((each_line.replace(\"\\n\", \"\")).split(\"\\t\"))\n elif isinstance(input_commands,list):\n self.base_list_of_lists=input_commands\n else:\n self._print_(print_prefix + ' Input argument should be either string(filename) or list(commands). Passed:' + str(type(input_commands)))\n custom_exit()\n input_status=self.validate_input_commands(self.base_list_of_lists)\n if self.developer_mode and input_status:\n self._print_(print_prefix + 'Input is Valid')\n return True\n\n def run(self):\n if not self.ins_browser:\n if not self.browser:\n self.browser = webdriver.PhantomJS()#Chrome()\n else:\n self.browser=self.ins_browser\n i = 0\n for each_list in self.base_list_of_lists:\n if self.developer_mode: \n self._print_('Input:\\t' + str(i + 1) + '\\t' + str(each_list))\n line = '\\t'.join(each_list)\n if each_list[0] == 'GO':\n try:\n status = self.go(each_list)\n if self.developer_mode: self._print_('Command:\\tGO\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_VALUE':\n try:\n status = self.get_value(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_VALUE\\tStatus\\t' + str(status))\n self.file_write(line, status)\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_VALUES':\n # self._print_(self.browser.page_source.encode('utf-8')\n try:\n status = self.get_values(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_VALUES\\tStatus\\t' + str(status)) \n self.file_write(line, status)\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_LINKS':\n try:\n self.file_write(line, \"Links as below\")\n status = self.get_links(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_LINKS\\tStatus\\t' + str(status))\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'CLICK':\n try:\n status = self.click(each_list) \n if self.developer_mode: self._print_('Command:\\tCLICK\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'ENTER_VALUE':\n try:\n status = self.enter_value(each_list)\n if self.developer_mode: self._print_('Command:\\tENTER_VALUE\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'SLEEP':\n self.sleep(each_list[1])\n status = \"Slept for \" + each_list[1] + \" second(s)\"\n if self.developer_mode: self._print_('Command:\\tSLEEP\\tStatus\\t' + str(status))\n self.file_write(line, status)\n elif each_list[0] == 'EXIT':\n self.file_write(\"EXIT\", \"OK\")\n if self.developer_mode: self._print_('Command:\\tEXIT')\n self.browser.quit()\n i += 1\n\n def go(self, list_of_values):\n self.browser.get(list_of_values[1])\n r = requests.get(list_of_values[1])\n time.sleep(2)\n link_status = r.status_code\n return link_status\n def close(self):\n if not self.ins_browser:\n if self.browser:\n self.browser.quit()\n def click(self, list_of_values):\n try:\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2])\n elif list_of_values[1] == 'CLASS':\n a_obj = self.find_by_class(list_of_values[2])\n if len(list_of_values) == 3:\n a_obj.click()\n return \"OK\"\n elif len(list_of_values) > 3:\n if list_of_values[4] == 'Available':\n if list_of_values[3] in self.data_dict.keys():\n a_obj.click()\n return \"OK\"\n else:\n return \"Not available\"\n elif list_of_values[4] == 'Not Available':\n if list_of_values[3] not in self.data_dict.keys():\n a_obj.click()\n self._print_('Function:\\tclick\\tCondition:\\t' + 'Available')\n return \"OK\"\n else:\n return \"Not available\"\n else:\n if list_of_values[4] == self.data_dict[list_of_values[3]]:\n a_obj.click()\n return \"OK\"\n else:\n return \"Not available\"\n except NoSuchElementException as e:\n self._print_('Function:\\tclick\\tError:\\t' + str(e) + '\\t Input:' + str(list_of_values))\n return \"Not available\"\n\n def get_value(self, list_of_values):\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2])\n if a_obj:\n self.data_dict[list_of_values[3]] = a_obj.text\n if self.developer_mode: self._print_('Function\\tget_value\\tData:\\t' + str(self.data_dict))\n return a_obj.text\n return \"Not available\"\n\n def get_values(self, list_of_values):\n edge_list = []\n new_news_list = []\n if list_of_values[1] == 'CLASS':\n elements = self.find_by_css_selector(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n elements = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n elements = self.find_by_name(list_of_values[2])\n elif list_of_values[1] == 'CSS':\n elements = self.find_by_css_selector(list_of_values[2])\n if elements:\n edge_list = [a.get_attribute(\"href\") for a in elements] \n for each in edge_list:\n if each and (not each.startswith('mailto')) and each not in new_news_list:\n new_news_list.append(each)\n return new_news_list\n\n def get_links(self, list_of_values):\n edge_list = []\n new_news_list = []\n if list_of_values[1] == 'CLASS':\n path = \"div.\"+list_of_values[2]+\" a\"\n elements = self.find_by_css_selector(path)\n elif list_of_values[1] == 'ID':\n path = \"div#\"+list_of_values[2]+\" a\"\n elements = self.find_by_css_selector(path)\n if elements: \n edge_list = [a.get_attribute(\"href\") for a in elements] \n for each in edge_list:\n if each and (not each.startswith('mailto')) and each not in new_news_list:\n new_news_list.append(each)\n if new_news_list: #do we need to check the 4th argument\n self.data_dict[list_of_values[3]]=new_news_list\n main_window = self.browser.current_window_handle \n if self.developer_mode: self._print_('Function\\tget_links\\tData:\\t' + str(new_news_list))\n self.file_write(\"\",str(len(new_news_list))+ \" link(s) found. Their status are: (link\"+\"\\t\"+\"is_url_active\"+\"\\t\"+\"is_redirected\"+\"\\t\"+\"redirected_to\"+\")\")\n for each_link in new_news_list:\n res_dict = url_check_status(each_link)\n line = each_link+\"\\t\"+res_dict['URL_Active']+\"\\t\"+res_dict['Redirected']\n self.file_write(line, res_dict['Redirected_into']) \n return new_news_list\n \n def enter_value(self, list_of_values):\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2]) \n if a_obj:\n if list_of_values[3] == \"Keys.ENTER\":\n a_obj.send_keys(Keys.ENTER)\n else:\n a_obj.send_keys(list_of_values[3])\n return \"Value entered\"\n return \"Not available\"\n\n def sleep(self, sleep_time):\n time.sleep(float(sleep_time))\n return True\n\n def find_by_id(self, input_id):\n input_id_obj = self.browser.find_element_by_id(input_id)\n return input_id_obj\n \n def find_elements_by_id(self, input_id):\n input_id_obj = self.browser.find_elements_by_id(input_id)\n return input_id_obj\n\n def find_by_xpath(self, input_xpath):\n input_xpath_obj = self.browser.find_element_by_xpath(input_xpath)\n return input_xpath_obj\n\n def find_by_name(self, input_name):\n input_id_obj = self.browser.find_element_by_name(input_name)\n return input_id_obj\n \n def find_by_class(self, input_name):\n input_class_obj = self.browser.find_element_by_class_name(input_name)\n return input_class_obj\n \n def find_by_css_selector(self, input_name):\n input_class_obj = self.browser.find_elements_by_css_selector(input_name)\n return input_class_obj\n\n def file_write(self, command_line, status):\n if self.write_output:\n with open(self.output_filename, \"a\") as result_file:\n result_file.write(command_line + \"\\t\" + str(status) + \"\\n\")\n def get_data_dictionary(self):\n return self.data_dict\n\nif __name__ == '__main__':\n # input_filename = 'input.txt'\n input_filename = 'input_22.txt'\n output_filename = 'output.txt'\n obj = PatternScraping(developer_mode=True)\n obj.feed_input([['GO','https://www.google.com'],['SLEEP','1'],['ENTER_VALUE','ID','lst-ib','Testing Automation'],['CLICK','NAME','btnG'],['SLEEP','5'],['EXIT']])\n obj.run()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pytest
import mock
from awx.main.models import (
UnifiedJob,
WorkflowJob,
WorkflowJobNode,
Job
)
def test_unified_job_workflow_attributes():
with mock.patch('django.db.ConnectionRouter.db_for_write'):
job = UnifiedJob(id=1, name="job-1", launch_type="workflow")
job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1))
assert job.spawned_by_workflow is True
assert job.workflow_job_id == 1
@pytest.fixture
def unified_job(mocker):
mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)
j = UnifiedJob()
j.status = 'pending'
j.cancel_flag = None
j.save = mocker.MagicMock()
j.websocket_emit_status = mocker.MagicMock()
return j
def test_cancel(unified_job):
unified_job.cancel()
assert unified_job.cancel_flag is True
assert unified_job.status == 'canceled'
assert unified_job.job_explanation == ''
# Note: the websocket emit status check is just reflecting the state of the current code.
# Some more thought may want to go into only emitting canceled if/when the job record
# status is changed to canceled. Unlike, currently, where it's emitted unconditionally.
unified_job.websocket_emit_status.assert_called_with("canceled")
unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status'])
def test_cancel_job_explanation(unified_job):
job_explanation = 'giggity giggity'
unified_job.cancel(job_explanation=job_explanation)
assert unified_job.job_explanation == job_explanation
unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status', 'job_explanation'])
def test_log_representation():
'''
Common representation used inside of log messages
'''
uj = UnifiedJob(status='running', id=4)
job = Job(status='running', id=4)
assert job.log_format == 'job 4 (running)'
assert uj.log_format == 'unified_job 4 (running)'
|
normal
|
{
"blob_id": "80a397b0974e41c4669f07638b5b38830b58cb37",
"index": 9051,
"step-1": "<mask token>\n\n\[email protected]\ndef unified_job(mocker):\n mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)\n j = UnifiedJob()\n j.status = 'pending'\n j.cancel_flag = None\n j.save = mocker.MagicMock()\n j.websocket_emit_status = mocker.MagicMock()\n return j\n\n\n<mask token>\n\n\ndef test_log_representation():\n \"\"\"\n Common representation used inside of log messages\n \"\"\"\n uj = UnifiedJob(status='running', id=4)\n job = Job(status='running', id=4)\n assert job.log_format == 'job 4 (running)'\n assert uj.log_format == 'unified_job 4 (running)'\n",
"step-2": "<mask token>\n\n\[email protected]\ndef unified_job(mocker):\n mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)\n j = UnifiedJob()\n j.status = 'pending'\n j.cancel_flag = None\n j.save = mocker.MagicMock()\n j.websocket_emit_status = mocker.MagicMock()\n return j\n\n\ndef test_cancel(unified_job):\n unified_job.cancel()\n assert unified_job.cancel_flag is True\n assert unified_job.status == 'canceled'\n assert unified_job.job_explanation == ''\n unified_job.websocket_emit_status.assert_called_with('canceled')\n unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status']\n )\n\n\ndef test_cancel_job_explanation(unified_job):\n job_explanation = 'giggity giggity'\n unified_job.cancel(job_explanation=job_explanation)\n assert unified_job.job_explanation == job_explanation\n unified_job.save.assert_called_with(update_fields=['cancel_flag',\n 'status', 'job_explanation'])\n\n\ndef test_log_representation():\n \"\"\"\n Common representation used inside of log messages\n \"\"\"\n uj = UnifiedJob(status='running', id=4)\n job = Job(status='running', id=4)\n assert job.log_format == 'job 4 (running)'\n assert uj.log_format == 'unified_job 4 (running)'\n",
"step-3": "<mask token>\n\n\ndef test_unified_job_workflow_attributes():\n with mock.patch('django.db.ConnectionRouter.db_for_write'):\n job = UnifiedJob(id=1, name='job-1', launch_type='workflow')\n job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1))\n assert job.spawned_by_workflow is True\n assert job.workflow_job_id == 1\n\n\[email protected]\ndef unified_job(mocker):\n mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)\n j = UnifiedJob()\n j.status = 'pending'\n j.cancel_flag = None\n j.save = mocker.MagicMock()\n j.websocket_emit_status = mocker.MagicMock()\n return j\n\n\ndef test_cancel(unified_job):\n unified_job.cancel()\n assert unified_job.cancel_flag is True\n assert unified_job.status == 'canceled'\n assert unified_job.job_explanation == ''\n unified_job.websocket_emit_status.assert_called_with('canceled')\n unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status']\n )\n\n\ndef test_cancel_job_explanation(unified_job):\n job_explanation = 'giggity giggity'\n unified_job.cancel(job_explanation=job_explanation)\n assert unified_job.job_explanation == job_explanation\n unified_job.save.assert_called_with(update_fields=['cancel_flag',\n 'status', 'job_explanation'])\n\n\ndef test_log_representation():\n \"\"\"\n Common representation used inside of log messages\n \"\"\"\n uj = UnifiedJob(status='running', id=4)\n job = Job(status='running', id=4)\n assert job.log_format == 'job 4 (running)'\n assert uj.log_format == 'unified_job 4 (running)'\n",
"step-4": "import pytest\nimport mock\nfrom awx.main.models import UnifiedJob, WorkflowJob, WorkflowJobNode, Job\n\n\ndef test_unified_job_workflow_attributes():\n with mock.patch('django.db.ConnectionRouter.db_for_write'):\n job = UnifiedJob(id=1, name='job-1', launch_type='workflow')\n job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1))\n assert job.spawned_by_workflow is True\n assert job.workflow_job_id == 1\n\n\[email protected]\ndef unified_job(mocker):\n mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)\n j = UnifiedJob()\n j.status = 'pending'\n j.cancel_flag = None\n j.save = mocker.MagicMock()\n j.websocket_emit_status = mocker.MagicMock()\n return j\n\n\ndef test_cancel(unified_job):\n unified_job.cancel()\n assert unified_job.cancel_flag is True\n assert unified_job.status == 'canceled'\n assert unified_job.job_explanation == ''\n unified_job.websocket_emit_status.assert_called_with('canceled')\n unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status']\n )\n\n\ndef test_cancel_job_explanation(unified_job):\n job_explanation = 'giggity giggity'\n unified_job.cancel(job_explanation=job_explanation)\n assert unified_job.job_explanation == job_explanation\n unified_job.save.assert_called_with(update_fields=['cancel_flag',\n 'status', 'job_explanation'])\n\n\ndef test_log_representation():\n \"\"\"\n Common representation used inside of log messages\n \"\"\"\n uj = UnifiedJob(status='running', id=4)\n job = Job(status='running', id=4)\n assert job.log_format == 'job 4 (running)'\n assert uj.log_format == 'unified_job 4 (running)'\n",
"step-5": "import pytest\nimport mock\n\nfrom awx.main.models import (\n UnifiedJob,\n WorkflowJob,\n WorkflowJobNode,\n Job\n)\n\n\ndef test_unified_job_workflow_attributes():\n with mock.patch('django.db.ConnectionRouter.db_for_write'):\n job = UnifiedJob(id=1, name=\"job-1\", launch_type=\"workflow\")\n job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1))\n\n assert job.spawned_by_workflow is True\n assert job.workflow_job_id == 1\n\n\[email protected]\ndef unified_job(mocker):\n mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)\n j = UnifiedJob()\n j.status = 'pending'\n j.cancel_flag = None\n j.save = mocker.MagicMock()\n j.websocket_emit_status = mocker.MagicMock()\n return j\n\n\ndef test_cancel(unified_job):\n\n unified_job.cancel()\n\n assert unified_job.cancel_flag is True\n assert unified_job.status == 'canceled'\n assert unified_job.job_explanation == ''\n # Note: the websocket emit status check is just reflecting the state of the current code.\n # Some more thought may want to go into only emitting canceled if/when the job record\n # status is changed to canceled. Unlike, currently, where it's emitted unconditionally.\n unified_job.websocket_emit_status.assert_called_with(\"canceled\")\n unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status'])\n\n\ndef test_cancel_job_explanation(unified_job):\n job_explanation = 'giggity giggity'\n\n unified_job.cancel(job_explanation=job_explanation)\n\n assert unified_job.job_explanation == job_explanation\n unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status', 'job_explanation'])\n\n\ndef test_log_representation():\n '''\n Common representation used inside of log messages\n '''\n uj = UnifiedJob(status='running', id=4)\n job = Job(status='running', id=4)\n assert job.log_format == 'job 4 (running)'\n assert uj.log_format == 'unified_job 4 (running)'\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import tensorflow as tf
from util.helper import focal_loss
from util.helper import conv_elu_bn
from util.helper import deconv_elu_bn
from util.helper import residual_block_elu
from util.helper import conv_elu
from util.helper import conv
from util.helper import reg_l1_loss
from util.helper import conv_bn
from util.helper import deconv
from util.helper import max_pool2d
from util.helper import upsample_layer
from util.helper import hourglass_module
from util.helper import conv_block
from util.helper import bottlenect_block_v1
from util.helper import pyramid_pooling_block
# 0 cat , 1 dog,
class model_objectdetection_ppm_centernet_v1:
def __init__(self, sess, class_count):
self.sess = sess
self.class_count = class_count
self.up_sample_rate = 1
self.feature_channels = 32
#self.hourglass_channel = 32
with tf.variable_scope('CenterNet'):
self._build_net()
def _build_net(self):
self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32, shape=[], name='learning_rate')
print(self.learning_rate_tensor)
self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3], name='X')
print(self.X)
self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')
print(self.keep_layer)
self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self.class_count], 'Y')
self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2], 'Y')
print(self.Y)
## Batch , Height , Width, Class
#X_input = tf.compat.v1.reshape(self.X, [-1, 512, 512, 3])
#Y_input = tf.compat.v1.reshape(self.Y, [-1, 128, 128, self.class_count])
# 512 512 -> 256x 256
with tf.variable_scope('downsamples'):
stage_1_1 = conv_block(self.X, conv_type='conv', filters=16, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32, kernel_size=3, strides=2, training=self.keep_layer)
stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64, kernel_size=3, strides=2, training=self.keep_layer)
with tf.variable_scope('feature_extraction'):
feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual1')
feature2 = bottlenect_block_v1(inputs=feature1, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual2')
feature3 = bottlenect_block_v1(inputs=feature2, filters=32, kernel_size=3, upsample_rate=2, strides=1, repeat=2, training=self.keep_layer, name='residual3')
with tf.variable_scope('pyramid_pooling'):
pyramid = pyramid_pooling_block(feature3, kernel_size=32, input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])
with tf.variable_scope('featurefuse'):
feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv', filters=160, kernel_size=1, strides=1, training=self.keep_layer)
print('test',feature_fuse_layer1)
feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])
depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2', [3, 3, 32 * 5, 1], initializer=tf.compat.v1.variance_scaling_initializer())
feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1, 1, 1], padding='SAME')
print('feature_deptiwise conv=', feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(feature_fuse_layer2, scale=True, center=True, momentum=0.9, training=self.keep_layer)
feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)
feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=feature_fuse_layer2, filters=1, kernel_size=1, strides=1, padding='same', kernel_initializer=tf.compat.v1.variance_scaling_initializer())
final_feature = feature_fuse_layer2 + feature_fuse_layer1
final_feature = tf.compat.v1.layers.batch_normalization(final_feature, scale=True, center=True, momentum=0.9, training=self.keep_layer)
final_feature = tf.compat.v1.nn.relu(final_feature)
with tf.variable_scope('classifier'):
classifiter = conv_block(final_feature, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
#classifiter = conv_block(classifiter, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)
print("=== network structure ===")
with tf.variable_scope("detector"):
#self.cls = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu1')
self.cls = conv(classifiter, filters=self.class_count, kernel_size=1, strides=1, name='detector_conv1')
self.cls = tf.compat.v1.nn.sigmoid(self.cls, name="heatmap")
#self.size = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu2')
self.size = conv(classifiter, filters=2, kernel_size=1, strides=1, name='detector_conv2')
self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')
print("heatmap sigmoid=", self.cls)
self.output = self.cls;
print("=== network structure ===")
self.heatmap_loss = focal_loss(self.output, self.Y)
self.size_loss = reg_l1_loss(self.size, self.SIZE)
self.cost = self.heatmap_loss + 0.1 * self.size_loss
# define cost/loss & optimizer
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.compat.v1.control_dependencies(update_ops):
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost, name='AdamMinimize')
print("==============Node Name List==============")
print("learning rate tensor : ", self.learning_rate_tensor)
print("Input Node Name : ", self.X)
print("Output 4 Train Node Name : ", self.Y)
print("Phase Node Name", self.keep_layer)
print("Output Node Name (heatmap) : ", self.output)
print("Output Node Name (sizemap) : ", self.size)
print("Cost Function Node Name : ", self.cost)
print("Run this operation for a train step :", self.optimizer.name)
print("==============Node Name List==============")
def predict(self, x_test, keep_prop=False):
return self.sess.run([self.output, self.size], feed_dict={self.X: x_test, self.keep_layer: keep_prop})
def get_cost(self, x_test, y_test, y_size, keep_prop=False):
# print(self.sess.run(self.output, feed_dict={self.X: x_test, self.keep_layer: keep_prop}))
return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y: y_test, self.SIZE:y_size, self.keep_layer: keep_prop})
def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):
return self.sess.run(self.optimizer, feed_dict={self.X: x_data, self.Y: y_data, self.SIZE:y_size, self.keep_layer: keep_prop, self.learning_rate_tensor: learn_rate})
|
normal
|
{
"blob_id": "e24a62f2a3ff0122922f472a7b37f1773dfe9c11",
"index": 7605,
"step-1": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n <mask token>\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n <mask token>\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-2": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n <mask token>\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-3": "<mask token>\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y:\n y_test, self.SIZE: y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-4": "import tensorflow as tf\nfrom util.helper import focal_loss\nfrom util.helper import conv_elu_bn\nfrom util.helper import deconv_elu_bn\nfrom util.helper import residual_block_elu\nfrom util.helper import conv_elu\nfrom util.helper import conv\nfrom util.helper import reg_l1_loss\nfrom util.helper import conv_bn\nfrom util.helper import deconv\nfrom util.helper import max_pool2d\nfrom util.helper import upsample_layer\nfrom util.helper import hourglass_module\nfrom util.helper import conv_block\nfrom util.helper import bottlenect_block_v1\nfrom util.helper import pyramid_pooling_block\n\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32,\n shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3],\n name='X')\n print(self.X)\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self\n .class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2\n ], 'Y')\n print(self.Y)\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32,\n kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64,\n kernel_size=3, strides=2, training=self.keep_layer)\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64,\n kernel_size=3, upsample_rate=2, strides=2, repeat=2,\n training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32,\n kernel_size=3, upsample_rate=2, strides=1, repeat=2,\n training=self.keep_layer, name='residual3')\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32,\n input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv',\n filters=160, kernel_size=1, strides=1, training=self.keep_layer\n )\n print('test', feature_fuse_layer1)\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2',\n [3, 3, 32 * 5, 1], initializer=tf.compat.v1.\n variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=\n feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1,\n 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(\n feature_fuse_layer2, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=\n feature_fuse_layer2, filters=1, kernel_size=1, strides=1,\n padding='same', kernel_initializer=tf.compat.v1.\n variance_scaling_initializer())\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(\n final_feature, scale=True, center=True, momentum=0.9,\n training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters\n =64, kernel_size=3, strides=1, training=self.keep_layer)\n print('=== network structure ===')\n with tf.variable_scope('detector'):\n self.cls = conv(classifiter, filters=self.class_count,\n kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name='heatmap')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides\n =1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n print('heatmap sigmoid=', self.cls)\n self.output = self.cls\n print('=== network structure ===')\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.\n UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate\n =self.learning_rate_tensor).minimize(self.cost, name=\n 'AdamMinimize')\n print('==============Node Name List==============')\n print('learning rate tensor : ', self.learning_rate_tensor)\n print('Input Node Name : ', self.X)\n print('Output 4 Train Node Name : ', self.Y)\n print('Phase Node Name', self.keep_layer)\n print('Output Node Name (heatmap) : ', self.output)\n print('Output Node Name (sizemap) : ', self.size)\n print('Cost Function Node Name : ', self.cost)\n print('Run this operation for a train step :', self.\n optimizer.name)\n print('==============Node Name List==============')\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X:\n x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y:\n y_test, self.SIZE: y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data,\n self.Y: y_data, self.SIZE: y_size, self.keep_layer: keep_prop,\n self.learning_rate_tensor: learn_rate})\n",
"step-5": "import tensorflow as tf\n\nfrom util.helper import focal_loss\nfrom util.helper import conv_elu_bn\nfrom util.helper import deconv_elu_bn\nfrom util.helper import residual_block_elu\nfrom util.helper import conv_elu\nfrom util.helper import conv\nfrom util.helper import reg_l1_loss\nfrom util.helper import conv_bn\nfrom util.helper import deconv\nfrom util.helper import max_pool2d\nfrom util.helper import upsample_layer\nfrom util.helper import hourglass_module\n\n\nfrom util.helper import conv_block\nfrom util.helper import bottlenect_block_v1\nfrom util.helper import pyramid_pooling_block\n\n# 0 cat , 1 dog,\n\nclass model_objectdetection_ppm_centernet_v1:\n\n def __init__(self, sess, class_count):\n self.sess = sess\n self.class_count = class_count\n self.up_sample_rate = 1\n self.feature_channels = 32\n #self.hourglass_channel = 32\n\n with tf.variable_scope('CenterNet'):\n self._build_net()\n\n def _build_net(self):\n self.learning_rate_tensor = tf.compat.v1.placeholder(tf.float32, shape=[], name='learning_rate')\n print(self.learning_rate_tensor)\n\n self.X = tf.compat.v1.placeholder(tf.float32, [None, 512, 512, 3], name='X')\n print(self.X)\n\n self.keep_layer = tf.compat.v1.placeholder(tf.bool, name='phase')\n print(self.keep_layer)\n\n self.Y = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, self.class_count], 'Y')\n self.SIZE = tf.compat.v1.placeholder(tf.float32, [None, 128, 128, 2], 'Y')\n print(self.Y)\n\n ## Batch , Height , Width, Class\n #X_input = tf.compat.v1.reshape(self.X, [-1, 512, 512, 3])\n #Y_input = tf.compat.v1.reshape(self.Y, [-1, 128, 128, self.class_count])\n\n\n # 512 512 -> 256x 256\n with tf.variable_scope('downsamples'):\n stage_1_1 = conv_block(self.X, conv_type='conv', filters=16, kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_2 = conv_block(stage_1_1, conv_type='ds', filters=32, kernel_size=3, strides=2, training=self.keep_layer)\n stage_1_3 = conv_block(stage_1_2, conv_type='ds', filters=64, kernel_size=3, strides=2, training=self.keep_layer)\n\n\n\n with tf.variable_scope('feature_extraction'):\n feature1 = bottlenect_block_v1(inputs=stage_1_3, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual1')\n feature2 = bottlenect_block_v1(inputs=feature1, filters=64, kernel_size=3, upsample_rate=2, strides=2, repeat=2, training=self.keep_layer, name='residual2')\n feature3 = bottlenect_block_v1(inputs=feature2, filters=32, kernel_size=3, upsample_rate=2, strides=1, repeat=2, training=self.keep_layer, name='residual3')\n\n\n with tf.variable_scope('pyramid_pooling'):\n pyramid = pyramid_pooling_block(feature3, kernel_size=32, input_width=32, input_height=32, bin_sizes=[2, 4, 6, 8])\n\n\n with tf.variable_scope('featurefuse'):\n feature_fuse_layer1 = conv_block(stage_1_3, conv_type='conv', filters=160, kernel_size=1, strides=1, training=self.keep_layer)\n print('test',feature_fuse_layer1)\n\n feature_fuse_layer2 = upsample_layer(pyramid, [128, 128])\n depthwise_filter = tf.compat.v1.get_variable('feature_fuse_layer2', [3, 3, 32 * 5, 1], initializer=tf.compat.v1.variance_scaling_initializer())\n feature_fuse_layer2 = tf.compat.v1.nn.depthwise_conv2d(input=feature_fuse_layer2, filter=depthwise_filter, strides=[1, 1, 1, 1], padding='SAME')\n print('feature_deptiwise conv=', feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.batch_normalization(feature_fuse_layer2, scale=True, center=True, momentum=0.9, training=self.keep_layer)\n feature_fuse_layer2 = tf.compat.v1.nn.relu(feature_fuse_layer2)\n feature_fuse_layer2 = tf.compat.v1.layers.conv2d(inputs=feature_fuse_layer2, filters=1, kernel_size=1, strides=1, padding='same', kernel_initializer=tf.compat.v1.variance_scaling_initializer())\n\n final_feature = feature_fuse_layer2 + feature_fuse_layer1\n final_feature = tf.compat.v1.layers.batch_normalization(final_feature, scale=True, center=True, momentum=0.9, training=self.keep_layer)\n final_feature = tf.compat.v1.nn.relu(final_feature)\n\n\n with tf.variable_scope('classifier'):\n classifiter = conv_block(final_feature, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)\n #classifiter = conv_block(classifiter, conv_type='ds', filters=64, kernel_size=3, strides=1, training=self.keep_layer)\n\n\n print(\"=== network structure ===\")\n\n with tf.variable_scope(\"detector\"):\n #self.cls = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu1')\n self.cls = conv(classifiter, filters=self.class_count, kernel_size=1, strides=1, name='detector_conv1')\n self.cls = tf.compat.v1.nn.sigmoid(self.cls, name=\"heatmap\")\n\n #self.size = conv_elu_bn(feature_fuse_layer2, filters=self.feature_channels, training=self.keep_layer, kernel_size=3, strides=1, name='detector_convelu2')\n self.size = conv(classifiter, filters=2, kernel_size=1, strides=1, name='detector_conv2')\n self.size = tf.compat.v1.nn.relu(self.size, name='sizemap')\n\n\n print(\"heatmap sigmoid=\", self.cls)\n\n self.output = self.cls;\n print(\"=== network structure ===\")\n\n\n self.heatmap_loss = focal_loss(self.output, self.Y)\n self.size_loss = reg_l1_loss(self.size, self.SIZE)\n self.cost = self.heatmap_loss + 0.1 * self.size_loss\n # define cost/loss & optimizer\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.compat.v1.control_dependencies(update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost, name='AdamMinimize')\n\n print(\"==============Node Name List==============\")\n print(\"learning rate tensor : \", self.learning_rate_tensor)\n print(\"Input Node Name : \", self.X)\n print(\"Output 4 Train Node Name : \", self.Y)\n print(\"Phase Node Name\", self.keep_layer)\n print(\"Output Node Name (heatmap) : \", self.output)\n print(\"Output Node Name (sizemap) : \", self.size)\n print(\"Cost Function Node Name : \", self.cost)\n print(\"Run this operation for a train step :\", self.optimizer.name)\n print(\"==============Node Name List==============\")\n\n def predict(self, x_test, keep_prop=False):\n return self.sess.run([self.output, self.size], feed_dict={self.X: x_test, self.keep_layer: keep_prop})\n\n def get_cost(self, x_test, y_test, y_size, keep_prop=False):\n # print(self.sess.run(self.output, feed_dict={self.X: x_test, self.keep_layer: keep_prop}))\n return self.sess.run(self.cost, feed_dict={self.X: x_test, self.Y: y_test, self.SIZE:y_size, self.keep_layer: keep_prop})\n\n def train(self, x_data, y_data, y_size, keep_prop=True, learn_rate=0.003):\n return self.sess.run(self.optimizer, feed_dict={self.X: x_data, self.Y: y_data, self.SIZE:y_size, self.keep_layer: keep_prop, self.learning_rate_tensor: learn_rate})",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
n = int(input())
m = int(input())
x = int(input())
y = int(input())
if m < n:
if m - x < x:
x = m - x
if n - y < y:
y = n - y
else:
if n - x < x:
x = n - x
if m - y < y:
y = m - y
if x < y:
print(x)
else:
print(y)
|
normal
|
{
"blob_id": "002cced6d24a4790d29f195355c795d609f744a7",
"index": 9134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-3": "n = int(input())\nm = int(input())\nx = int(input())\ny = int(input())\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# funkcja usuwająca zera z listy
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
# funkcja sortująca listę
def sort_desc(given_list):
# sorted_list = []
# for i in range(0, len(given_list)):
# for element in given_list:
# if element == max(given_list):
# sorted_list.append(element)
# given_list.remove(element)
return sorted(given_list, key=None, reverse=True)
# funkcja sprawdzająca czy iilość elementów jest mniejsza od danej wartości
# zwraca wartość logiczną danego wyrażenia
def length_check(n, given_list):
return n > len(given_list)
# funkcja odejmująca 1 od pierwszych n-elementów listy
def substract_one_for_n_elements(n, given_list):
minus_one_list = given_list[:]
for i in range(0, n):
minus_one_list[i] -= 1
return minus_one_list
# wielki finał i kompletny algorytm Havel-Hakimi.
# This algorithm will return true if the answers are consistent
# (i.e. it's possible that everyone is telling the truth)
# and false if the answers are inconsistent (i.e. someone must be lying)
def hh(given_list):
if given_list == []:
return True
else:
# 1
while given_list != []:
given_list = remove_zeros(given_list)
# 2
if given_list == []:
return True
break
else:
# 3
given_list = sort_desc(given_list)
# 4
n = given_list.pop(0)
# 5
if length_check(n, given_list):
return False
break
# 6, 7
else:
given_list = substract_one_for_n_elements(n, given_list)
# *****************************************
# testy
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, 7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
def test_sort_desc():
assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]
assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]
assert sort_desc([1]) == [1]
assert sort_desc([]) == []
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, 4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
|
normal
|
{
"blob_id": "0779e516e35c41acf0529961e11541dfd1320749",
"index": 6501,
"step-1": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-2": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\n<mask token>\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-3": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef hh(given_list):\n if given_list == []:\n return True\n else:\n while given_list != []:\n given_list = remove_zeros(given_list)\n if given_list == []:\n return True\n break\n else:\n given_list = sort_desc(given_list)\n n = given_list.pop(0)\n if length_check(n, given_list):\n return False\n break\n else:\n given_list = substract_one_for_n_elements(n, given_list)\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\n<mask token>\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-4": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\ndef substract_one_for_n_elements(n, given_list):\n minus_one_list = given_list[:]\n for i in range(0, n):\n minus_one_list[i] -= 1\n return minus_one_list\n\n\ndef hh(given_list):\n if given_list == []:\n return True\n else:\n while given_list != []:\n given_list = remove_zeros(given_list)\n if given_list == []:\n return True\n break\n else:\n given_list = sort_desc(given_list)\n n = given_list.pop(0)\n if length_check(n, given_list):\n return False\n break\n else:\n given_list = substract_one_for_n_elements(n, given_list)\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\ndef test_sort_desc():\n assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]\n assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]\n assert sort_desc([1]) == [1]\n assert sort_desc([]) == []\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-5": "# funkcja usuwająca zera z listy \n\ndef remove_zeros(given_list):\n\n list_without_zero = []\n\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n\n return list_without_zero\n\n# funkcja sortująca listę\n\ndef sort_desc(given_list):\n\n # sorted_list = []\n \n # for i in range(0, len(given_list)):\n # for element in given_list:\n # if element == max(given_list):\n # sorted_list.append(element)\n # given_list.remove(element) \n\n return sorted(given_list, key=None, reverse=True)\n\n# funkcja sprawdzająca czy iilość elementów jest mniejsza od danej wartości\n# zwraca wartość logiczną danego wyrażenia\n\ndef length_check(n, given_list):\n\n return n > len(given_list)\n\n# funkcja odejmująca 1 od pierwszych n-elementów listy\n\ndef substract_one_for_n_elements(n, given_list):\n\n minus_one_list = given_list[:]\n\n for i in range(0, n):\n minus_one_list[i] -= 1\n\n return minus_one_list\n\n# wielki finał i kompletny algorytm Havel-Hakimi.\n# This algorithm will return true if the answers are consistent \n# (i.e. it's possible that everyone is telling the truth) \n# and false if the answers are inconsistent (i.e. someone must be lying)\n\ndef hh(given_list):\n\n if given_list == []:\n return True\n \n else:\n # 1\n while given_list != []:\n given_list = remove_zeros(given_list)\n # 2\n if given_list == []:\n return True\n break\n\n else:\n # 3\n given_list = sort_desc(given_list)\n # 4\n n = given_list.pop(0) \n # 5 \n if length_check(n, given_list):\n return False\n break\n # 6, 7\n else:\n given_list = substract_one_for_n_elements(n, given_list) \n\n# *****************************************\n# testy\n\n\ndef test_remove_zeros():\n\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\ndef test_sort_desc():\n\n assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]\n assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]\n assert sort_desc([1]) == [1]\n assert sort_desc([]) == []\n\n\ndef test_length_check():\n\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import random
from utils.misc import *
from utils.adapt_helpers import *
from utils.rotation import rotate_batch, rotate_single_with_label
from utils.model import resnet18
from utils.train_helpers import normalize, te_transforms
from utils.test_helpers import test
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
########################################################################
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
########################################################################
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
########################################################################
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001 # to correct for numeric errors
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' %(args.resume))
ckpt = torch.load('%s/best.pth' %(args.resume))
net.load_state_dict(ckpt['net'])
print("Starting Test Error: %.3f" % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print("Lethean Attack")
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)
if i % 50 == 49:
print("%d%%" % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)
print("Epoch %d Test error: %.3f" % (i, err_cls))
|
normal
|
{
"blob_id": "1f345a20343eb859cb37bf406623c0fc10722357",
"index": 4826,
"step-1": "<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n<mask token>\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\nprint('Resuming from %s...' % args.resume)\n<mask token>\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\n<mask token>\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-3": "<mask token>\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-4": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-5": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\n\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\n########################################################################\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\n########################################################################\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\n########################################################################\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n\nargs = parser.parse_args()\nargs.threshold += 0.001\t\t# to correct for numeric errors\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\nnorm_layer = gn_helper\n\nnet = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\n\nprint('Resuming from %s...' %(args.resume))\nckpt = torch.load('%s/best.pth' %(args.resume))\nnet.load_state_dict(ckpt['net'])\nprint(\"Starting Test Error: %.3f\" % ckpt['err_cls'])\n\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\n\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\n\nprint(\"Lethean Attack\")\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)\n\n if i % 50 == 49:\n print(\"%d%%\" % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)\n print(\"Epoch %d Test error: %.3f\" % (i, err_cls))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:
h = len(matrix)
w = len(matrix[0])
for curRow in range(h) :
val = matrix[curRow][0]
i = 0
while i < h-curRow and i < w :
# print(curRow+i,i)
if matrix[curRow+i][i] != val :
return False
i += 1
# print('pass')
for curCol in range(w) :
val = matrix[0][curCol]
i = 0
while i < h and i < w-curCol :
if matrix[i][curCol+i] != val :
return False
i += 1
return True
|
normal
|
{
"blob_id": "774f5d01cd274755626989c2b58bde68df349d8e",
"index": 5845,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def isToeplitzMatrix(self, matrix: List[List[int]]) ->bool:\n h = len(matrix)\n w = len(matrix[0])\n for curRow in range(h):\n val = matrix[curRow][0]\n i = 0\n while i < h - curRow and i < w:\n if matrix[curRow + i][i] != val:\n return False\n i += 1\n for curCol in range(w):\n val = matrix[0][curCol]\n i = 0\n while i < h and i < w - curCol:\n if matrix[i][curCol + i] != val:\n return False\n i += 1\n return True\n",
"step-4": "class Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n h = len(matrix)\n w = len(matrix[0])\n for curRow in range(h) :\n val = matrix[curRow][0]\n i = 0\n while i < h-curRow and i < w :\n # print(curRow+i,i)\n if matrix[curRow+i][i] != val :\n return False\n i += 1\n # print('pass')\n for curCol in range(w) :\n val = matrix[0][curCol]\n i = 0\n while i < h and i < w-curCol :\n if matrix[i][curCol+i] != val :\n return False\n i += 1\n return True",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import matplotlib.pyplot as plt
import numpy as np
# 描画用サンプルデータ
#x= np.array([0,1,2,3,4])
y = np.array([2, 2, 3, 4, 5])
print(y)
#print(range(y))
plt.figure(figsize=(10,1))
plt.bar(range(len(y)), y)
plt.savefig('test.png')
plt.clf()
|
normal
|
{
"blob_id": "2f714ed54a19ec26d7ecb1979e79366721b3d0fe",
"index": 6682,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-3": "<mask token>\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\n# 描画用サンプルデータ\n#x= np.array([0,1,2,3,4])\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\n#print(range(y))\n\nplt.figure(figsize=(10,1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
"""
Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian, Sina Ghiassian
Purpose: Skeleton code for Monte Carlo Exploring Starts Control Agent
for use on A3 of Reinforcement learning course University of Alberta Fall 2017
"""
"""
/*
* Copyright (c) HAOTIAN ZHU ,COMPUT301,University Of Alberta All Rights Reserved.
* You May Use, Distribute Or Modify This Code Under Term And
* Condition Of Code Of Students Behavior At University Of Alberta.
*
*
* Author: Haotian Zhu
* If You Have Any Question Please contact [email protected].
*
*/
"""
import numpy as np
import pickle
from importlib import import_module
tile = import_module("tiles3")
iht = tile.IHT(3000)
w = None
currentState = None
lastState = None
alpha = 0.01/50
gamma = 1.0
x = None
def agent_init():
global w,currentState,lastState,x
w = np.zeros(1200)
currentState = np.zeros(1)
lastState = np.zeros(1)
return
def agent_start(state):
global w,currentState,lastState,x
currentState[0] = float(state[0]/200.0)
lastState[0] = currentState[0]
action = chooseAction(state[0])
return action
def agent_step(reward, state):
global w,currentState,lastState,x
state1 = np.zeros(1200)
state2 = np.zeros(1200)
currentState[0] = float(state[0]/200.0)
currentx = tile.tiles(iht,50,currentState)
lastx = tile.tiles(iht,50,lastState)
for index in currentx:
state1[index] = 1
for index in lastx:
state2[index] = 1
w = w + alpha*(reward+gamma*np.dot(w,state1) - np.dot(w,state2))*state2
lastState[0] = currentState[0]
action = chooseAction(state[0])
return action
def agent_end(reward):
global w,currentState,lastState,x
state2 = np.zeros(1200)
lastx = tile.tiles(iht,50,lastState)
for index in lastx:
state2[index] = 1
w = w + alpha*(reward- np.dot(w,state2))*state2
return
def agent_cleanup():
"""
This function is not used
"""
# clean up
return
def agent_message(in_message): # returns string, in_message: string
global w
"""
Arguments: in_message: string
returns: The value function as a string.
This function is complete. You do not need to add code here.
"""
# should not need to modify this function. Modify at your own risk
if (in_message == 'ValueFunction'):
out = np.zeros(1000)
for i in range(1000):
x = tile.tiles(iht,50,[float(i/200.0)])
state = np.zeros(1200)
for index in x:
state[index] = 1
out[i] = np.dot(w,state)
return out
else:
return "I don't know what to return!!"
def chooseAction(state):
if np.random.randint(2) : #1
result = np.random.randint(100)+1
if result+state>=1000:
return 1000-state
else:
return result
else:
result = (np.random.randint(100)+1)*(-1)
if result+state<=0:
return state*(-1)
else:
return result
|
normal
|
{
"blob_id": "4e02edcf8a512060fa92ede11f33993978584147",
"index": 1997,
"step-1": "\n\n\n\n#!/usr/bin/env python\n\n\"\"\"\n Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian, Sina Ghiassian\n Purpose: Skeleton code for Monte Carlo Exploring Starts Control Agent\n\t\t for use on A3 of Reinforcement learning course University of Alberta Fall 2017\n \n\"\"\"\n\"\"\"\n/*\n * Copyright (c) HAOTIAN ZHU ,COMPUT301,University Of Alberta All Rights Reserved.\n * You May Use, Distribute Or Modify This Code Under Term And \n * Condition Of Code Of Students Behavior At University Of Alberta.\n *\n *\n * Author: Haotian Zhu\n * If You Have Any Question Please contact [email protected].\n * \n */\n\"\"\"\n\nimport numpy as np\nimport pickle\n\n\nfrom importlib import import_module\n\n\n\n\ntile = import_module(\"tiles3\")\niht = tile.IHT(3000)\n\n\n\nw = None\ncurrentState = None\nlastState = None\nalpha = 0.01/50\ngamma = 1.0\nx = None\n\n\ndef agent_init():\n\tglobal w,currentState,lastState,x\n\n\tw = np.zeros(1200)\n\tcurrentState = np.zeros(1)\n\tlastState = np.zeros(1)\n\n\n\treturn\n\ndef agent_start(state):\n\tglobal w,currentState,lastState,x\n\n\tcurrentState[0] = float(state[0]/200.0)\n\tlastState[0] = currentState[0]\n\taction = chooseAction(state[0])\n\n\n\n\n\treturn action\n\n\ndef agent_step(reward, state): \n\tglobal w,currentState,lastState,x\n\n\tstate1 = np.zeros(1200)\n\tstate2 = np.zeros(1200)\n\n\tcurrentState[0] = float(state[0]/200.0)\n\tcurrentx = tile.tiles(iht,50,currentState)\n\tlastx = tile.tiles(iht,50,lastState)\n\n\n\tfor index in currentx:\n\t\tstate1[index] = 1\n\tfor index in lastx:\n\t\tstate2[index] = 1\n\n\n\n\tw = w + alpha*(reward+gamma*np.dot(w,state1) - np.dot(w,state2))*state2\n\tlastState[0] = currentState[0]\n\taction = chooseAction(state[0])\n\n\treturn action\n\ndef agent_end(reward):\n\tglobal w,currentState,lastState,x\n\n\n\tstate2 = np.zeros(1200)\n\n\tlastx = tile.tiles(iht,50,lastState)\n\n\tfor index in lastx:\n\t\tstate2[index] = 1\n\n\n\tw = w + alpha*(reward- np.dot(w,state2))*state2\n\n\n\n\treturn\n\ndef agent_cleanup():\n\t\"\"\"\n\tThis function is not used\n\t\"\"\"\n\t# clean up\n\n\treturn\n\ndef agent_message(in_message): # returns string, in_message: string\n \tglobal w\n\t\"\"\"\n\tArguments: in_message: string\n\treturns: The value function as a string.\n\tThis function is complete. You do not need to add code here.\n\t\"\"\"\n\t# should not need to modify this function. Modify at your own risk\n\tif (in_message == 'ValueFunction'):\n\t\tout = np.zeros(1000)\n\t\tfor i in range(1000):\n\t\t\tx = tile.tiles(iht,50,[float(i/200.0)])\n\t\t\tstate = np.zeros(1200)\n\t\t\tfor index in x:\n\t\t\t\tstate[index] = 1\n\n\t\t\tout[i] = np.dot(w,state)\n\t\treturn out\n\telse:\n\t\treturn \"I don't know what to return!!\"\n\n\n\n\n\ndef chooseAction(state):\n\tif np.random.randint(2) : #1\n\t\tresult = np.random.randint(100)+1\n\t\tif result+state>=1000:\n\t\t\treturn 1000-state\n\t\telse:\n\t\t\treturn result\n\n\telse:\n\t\tresult = (np.random.randint(100)+1)*(-1)\n\t\tif result+state<=0:\n\t\t\treturn state*(-1)\n\t\telse:\n\t\t\treturn result \n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.0.3 on 2018-07-05 04:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application_manager', '0015_auto_20180705_0415'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='user',
),
]
|
normal
|
{
"blob_id": "7bf81954bef81004b6c9838ed00c624d24fcf0c6",
"index": 3839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application_manager', '0015_auto_20180705_0415')]\n operations = [migrations.RemoveField(model_name='application', name='user')\n ]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application_manager', '0015_auto_20180705_0415')]\n operations = [migrations.RemoveField(model_name='application', name='user')\n ]\n",
"step-5": "# Generated by Django 2.0.3 on 2018-07-05 04:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application_manager', '0015_auto_20180705_0415'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='application',\n name='user',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
# from .models import Usuario
# from .models import Lote
# from .models import Fornecedor
# from .models import Cliente
# from .models import Medicamento
# from .models import Medicamento_Entrada
# from .models import Medicamento_Saida
# Register your models here.
#
# class UsuarioAdmin(admin.ModelAdmin):
# list_display = ['nome','login','senha']
# class FornecedorAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class LoteAdmin(admin.ModelAdmin):
# list_display = ['numero','fornecedor','fabricacao','vencimento']
# class ClienteAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class MedicamentoAdmin(admin.ModelAdmin):
# list_display = ['nome','data_insercao','descricao']
# class Medicamento_EntradaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','lote','quantidade','data_entrada','usuario']
# class Medicamento_SaidaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','quantidade','data_saida','usuario']
# admin.site.register(Usuario,UsuarioAdmin)
# admin.site.register(Lote,LoteAdmin)
# admin.site.register(Fornecedor,FornecedorAdmin)
# admin.site.register(Cliente,ClienteAdmin)
# admin.site.register(Medicamento,MedicamentoAdmin)
# admin.site.register(Medicamento_Entrada,Medicamento_EntradaAdmin)
# admin.site.register(Medicamento_Saida,Medicamento_SaidaAdmin)
|
normal
|
{
"blob_id": "63a2258bf0ed779254b68a683e3d30e9fb356b1f",
"index": 139,
"step-1": "<mask token>\n",
"step-2": "from django.contrib import admin\n",
"step-3": "from django.contrib import admin\n# from .models import Usuario\n# from .models import Lote\n# from .models import Fornecedor\n# from .models import Cliente\n# from .models import Medicamento\n# from .models import Medicamento_Entrada\n# from .models import Medicamento_Saida\n# Register your models here.\n#\n# class UsuarioAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','login','senha']\n\n# class FornecedorAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','contato']\n\n# class LoteAdmin(admin.ModelAdmin):\n# \tlist_display = ['numero','fornecedor','fabricacao','vencimento']\n\n# class ClienteAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','contato']\n\n# class MedicamentoAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','data_insercao','descricao']\n\n# class Medicamento_EntradaAdmin(admin.ModelAdmin):\n# \tlist_display = ['medicamento','lote','quantidade','data_entrada','usuario']\n\n# class Medicamento_SaidaAdmin(admin.ModelAdmin):\n# \tlist_display = ['medicamento','quantidade','data_saida','usuario']\n\n\n# admin.site.register(Usuario,UsuarioAdmin)\n# admin.site.register(Lote,LoteAdmin)\n# admin.site.register(Fornecedor,FornecedorAdmin)\n# admin.site.register(Cliente,ClienteAdmin)\n# admin.site.register(Medicamento,MedicamentoAdmin)\n# admin.site.register(Medicamento_Entrada,Medicamento_EntradaAdmin)\n# admin.site.register(Medicamento_Saida,Medicamento_SaidaAdmin)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
from .metrics import r2_score
class LinearRegression:
def __init__(self):
self.coef_ = None # 系数
self.interception_ = None # 截距
self._theta = None
def fit_normal(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], ""
#!!!important
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=1e4):
assert X_train.shape[0] == y_train.shape[0], ""
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
# 向量化实现
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-8):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
# n_iter 代表观测所有数据几次
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ""
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
# Stochastic gradient descent
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self,X_predict):
assert self.interception_ is not None and self.coef_ is not None,\
"must fit before predict"
assert X_predict.shape[1] == len(self.coef_),\
"the feature number of X_predict must be equal to X_train"
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self,X_test,y_test):
y_predict = self.predict(X_test)
return r2_score(y_test,y_predict)
def __repr__(self):
return "LinearRegression()"
|
normal
|
{
"blob_id": "e47e614c88c78fb6e8ff4098ea2b89d21bfa9684",
"index": 6935,
"step-1": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n <mask token>\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n <mask token>\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-3": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], ''\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-4": "import numpy as np\nfrom .metrics import r2_score\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], ''\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-5": "import numpy as np\nfrom .metrics import r2_score\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None # 系数\n self.interception_ = None # 截距\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], \"\"\n\n #!!!important\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=1e4):\n\n assert X_train.shape[0] == y_train.shape[0], \"\"\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n # 向量化实现\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-8):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n break\n i_iter += 1\n return theta\n\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n # n_iter 代表观测所有数据几次\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n\n assert X_train.shape[0] == y_train.shape[0], \"\"\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n # Stochastic gradient descent\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n\n return theta\n\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self,X_predict):\n assert self.interception_ is not None and self.coef_ is not None,\\\n \"must fit before predict\"\n assert X_predict.shape[1] == len(self.coef_),\\\n \"the feature number of X_predict must be equal to X_train\"\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self,X_test,y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test,y_predict)\n\n def __repr__(self):\n return \"LinearRegression()\"",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
#!/usr/bin/env python
import re
class Solution:
def __new__(self, p):
nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self, p)
inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)
replaced_by_ = p.replace(' ' ,'-')
combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'
combined_queries += str(self.reverse_words(inversed)) + '::'
combined_queries += str(replaced_by_ )+ '::' + str(replaced)
return combined_queries
def count_vowels_consonants(self, text):
vowels_list = ['A', 'E', 'I', 'O', 'U']
consonants = 0
vowels = 0
string = ''
for character in text:
if character.isalpha():
if character.upper() in vowels_list:
vowels += 1
string += 'pv'
else:
consonants += 1
string += character
return (vowels, consonants, string)
def reverse_words(word):
list_string = word.split(' ')
list_string.reverse()
string = ' '.join(list_string)
return string
if __name__ == '__main__':
solutions = Solution('The iterator is just clutter')
# solutions = Solution('The')
print(solutions)
|
normal
|
{
"blob_id": "ec9de8d54113806ab327f05e077edefa74258adb",
"index": 2662,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def __new__(self, p):\n nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self,\n p)\n inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)\n replaced_by_ = p.replace(' ', '-')\n combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'\n combined_queries += str(self.reverse_words(inversed)) + '::'\n combined_queries += str(replaced_by_) + '::' + str(replaced)\n return combined_queries\n <mask token>\n\n def reverse_words(word):\n list_string = word.split(' ')\n list_string.reverse()\n string = ' '.join(list_string)\n return string\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def __new__(self, p):\n nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self,\n p)\n inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)\n replaced_by_ = p.replace(' ', '-')\n combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'\n combined_queries += str(self.reverse_words(inversed)) + '::'\n combined_queries += str(replaced_by_) + '::' + str(replaced)\n return combined_queries\n\n def count_vowels_consonants(self, text):\n vowels_list = ['A', 'E', 'I', 'O', 'U']\n consonants = 0\n vowels = 0\n string = ''\n for character in text:\n if character.isalpha():\n if character.upper() in vowels_list:\n vowels += 1\n string += 'pv'\n else:\n consonants += 1\n string += character\n return vowels, consonants, string\n\n def reverse_words(word):\n list_string = word.split(' ')\n list_string.reverse()\n string = ' '.join(list_string)\n return string\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def __new__(self, p):\n nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self,\n p)\n inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)\n replaced_by_ = p.replace(' ', '-')\n combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'\n combined_queries += str(self.reverse_words(inversed)) + '::'\n combined_queries += str(replaced_by_) + '::' + str(replaced)\n return combined_queries\n\n def count_vowels_consonants(self, text):\n vowels_list = ['A', 'E', 'I', 'O', 'U']\n consonants = 0\n vowels = 0\n string = ''\n for character in text:\n if character.isalpha():\n if character.upper() in vowels_list:\n vowels += 1\n string += 'pv'\n else:\n consonants += 1\n string += character\n return vowels, consonants, string\n\n def reverse_words(word):\n list_string = word.split(' ')\n list_string.reverse()\n string = ' '.join(list_string)\n return string\n\n\nif __name__ == '__main__':\n solutions = Solution('The iterator is just clutter')\n print(solutions)\n",
"step-4": "import re\n\n\nclass Solution:\n\n def __new__(self, p):\n nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self,\n p)\n inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)\n replaced_by_ = p.replace(' ', '-')\n combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'\n combined_queries += str(self.reverse_words(inversed)) + '::'\n combined_queries += str(replaced_by_) + '::' + str(replaced)\n return combined_queries\n\n def count_vowels_consonants(self, text):\n vowels_list = ['A', 'E', 'I', 'O', 'U']\n consonants = 0\n vowels = 0\n string = ''\n for character in text:\n if character.isalpha():\n if character.upper() in vowels_list:\n vowels += 1\n string += 'pv'\n else:\n consonants += 1\n string += character\n return vowels, consonants, string\n\n def reverse_words(word):\n list_string = word.split(' ')\n list_string.reverse()\n string = ' '.join(list_string)\n return string\n\n\nif __name__ == '__main__':\n solutions = Solution('The iterator is just clutter')\n print(solutions)\n",
"step-5": "#!/usr/bin/env python\n\nimport re \n\n\n\nclass Solution:\n\n\tdef __new__(self, p):\n\t\tnr_counts, nr_consonants, replaced = self.count_vowels_consonants(self, p)\n\t\tinversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)\n\t\treplaced_by_ = p.replace(' ' ,'-')\n\t\tcombined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::' \n\t\tcombined_queries += str(self.reverse_words(inversed)) + '::' \n\t\tcombined_queries += str(replaced_by_ )+ '::' + str(replaced)\n\t\treturn combined_queries\n\n\tdef count_vowels_consonants(self, text):\n\t\tvowels_list = ['A', 'E', 'I', 'O', 'U']\n\t\tconsonants = 0\n\t\tvowels = 0\n\t\tstring = ''\n\t\tfor character in text:\n\t\t if character.isalpha():\n\t\t \tif character.upper() in vowels_list:\n\t\t\t \tvowels += 1\n\t\t\t \tstring += 'pv'\n\t\t \telse:\n\t\t \t\tconsonants += 1\n\t\t string += character\n\t\treturn (vowels, consonants, string)\n\n\tdef reverse_words(word):\n\t\tlist_string = word.split(' ')\n\t\tlist_string.reverse()\n\t\tstring = ' '.join(list_string) \n\t\treturn string\t\n\nif __name__ == '__main__':\n\tsolutions = Solution('The iterator is just clutter')\n\t# solutions = Solution('The')\n\tprint(solutions)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from odoo import fields, models
class LunchWizard(models.TransientModel):
_name = "lunch.wizard"
_description = "LunchWizard"
lun_type = fields.Char(string="Set New Lunch Type")
lunch_id = fields.Many2one('lunch.lunch', string="Lunch Id")
def action_process_lunch(self):
self.lunch_id.lunch_type = self.lun_type
#self.write( { self.lunch_id.lunch_type : self.lun_type } )
|
normal
|
{
"blob_id": "85e5bf57f7eba2cbee0fbb8a4d37b5180208f9b7",
"index": 3830,
"step-1": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-3": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n _name = 'lunch.wizard'\n _description = 'LunchWizard'\n lun_type = fields.Char(string='Set New Lunch Type')\n lunch_id = fields.Many2one('lunch.lunch', string='Lunch Id')\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-4": "from odoo import fields, models\n\n\nclass LunchWizard(models.TransientModel):\n _name = 'lunch.wizard'\n _description = 'LunchWizard'\n lun_type = fields.Char(string='Set New Lunch Type')\n lunch_id = fields.Many2one('lunch.lunch', string='Lunch Id')\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nfrom odoo import fields, models\n\n\nclass LunchWizard(models.TransientModel):\n _name = \"lunch.wizard\"\n _description = \"LunchWizard\"\n\n lun_type = fields.Char(string=\"Set New Lunch Type\")\n lunch_id = fields.Many2one('lunch.lunch', string=\"Lunch Id\")\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n #self.write( { self.lunch_id.lunch_type : self.lun_type } )",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import numpy as np
import show_imgs as si
IMG_PATH = "../sample_imgs"
def blur():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f"ksize: {ksize}"
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, "cv2.filter2D", 3)
resimg = si.show_imgs(blur_imgs, "cv2.blur", 3)
def gaussian():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["blur"] = cv2.blur(image, kernel_size)
blur_imgs["GaussianBlur"] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, "GaussianBlur", 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["gaussian"] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs["bilateral (5,50,50)"] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs["bilateral (5,150,150)"] = cv2.bilateralFilter(image, 5, 150, 150)
result_img = si.show_imgs(blur_imgs, "Bilateral Filter", 2)
if __name__ == "__main__":
# gaussian()
bilateral()
|
normal
|
{
"blob_id": "8e5d05d925d47a85ad7c211f26af7951be048d32",
"index": 9351,
"step-1": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-3": "<mask token>\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-4": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-5": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = \"../sample_imgs\"\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f\"ksize: {ksize}\"\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, \"cv2.filter2D\", 3)\n resimg = si.show_imgs(blur_imgs, \"cv2.blur\", 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"blur\"] = cv2.blur(image, kernel_size)\n blur_imgs[\"GaussianBlur\"] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, \"GaussianBlur\", 3, 1000)\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"gaussian\"] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs[\"bilateral (5,50,50)\"] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs[\"bilateral (5,150,150)\"] = cv2.bilateralFilter(image, 5, 150, 150)\n result_img = si.show_imgs(blur_imgs, \"Bilateral Filter\", 2)\n\n\n\nif __name__ == \"__main__\":\n # gaussian()\n bilateral()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from django.http import HttpResponse
from rest_framework.decorators import api_view
@api_view(['GET'])
def get_status(request):
if request.method == 'GET':
return HttpResponse(content='Service is OK!')
|
normal
|
{
"blob_id": "f021940c16b7ed7fdf1088f2137d3ef724719c80",
"index": 1726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef get_status(request):\n if request.method == 'GET':\n return HttpResponse(content='Service is OK!')\n",
"step-3": "from django.http import HttpResponse\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\ndef get_status(request):\n if request.method == 'GET':\n return HttpResponse(content='Service is OK!')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import math
import numpy as np
# import tkinter
import tensorflow as tf
from matplotlib import axis
import os
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
class MD(BaseEstimator, TransformerMixin):
def __init__(self, data, input_size, epoch,
batch_size, iteration, alpha=1.0, n_neg_samples=10,
random_seed=2020):
# bind params to class
# network parameters.
self.iteration = iteration
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = 0.01
self.random_seed = random_seed
self.phase = True
self.first_layer_size = 256
self.second_layer_size = 128
self.third_layer_size = 128
self.input_size = input_size
# data.
self.X_train_ben = data[0]
self.X_train_mal = data[1]
self.X_test_ben = data[2]
self.X_test_mal = data[3]
# evaluation.
self.accuracy_list = [] # accuracy during training
self.fmeasure_list = [] # fmeasure during training
self.clusters_dist = [] # distance between clusters centroid
self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [],
'fmeasure': []} # evaluation metrics of test data for all epochs
self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}
# init all variables in a tensorflow graph
self._init_graph()
def _init_graph(self):
'''
Init a tensorflow Graph containing: input data, variables, model, loss, optimizer
'''
self.graph = tf.Graph()
with self.graph.as_default(): # , tf.device('/cpu:0'):
# Set graph level random seed.
tf.set_random_seed(self.random_seed)
# Input data.
self.train_data = tf.placeholder(tf.float32,
shape=[None, self.input_size]) # batch_size * input_size
self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # batch_size * 1
self.train_labels_center = tf.placeholder(tf.float32, shape=[None,
self.third_layer_size]) # batch_size * third_layer_size
self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None,
self.third_layer_size]) # batch_size * third_layer_size
# Variables.
self.weights = self._initialize_weights()
# the embedding layer.
self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324)
self.embedding_result = self.embedding_layer(self.train_data)
self.embedding_result = tf.layers.Flatten()(self.embedding_result)
# the first hidden layer.
self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']) # batch_size * first_layer_size
self.layer1 = tf.layers.batch_normalization(self.net1, training=self.phase)
self.layer1 = tf.nn.tanh(self.layer1)
# the second hidden layer.
self.net2 = tf.matmul(self.layer1, self.weights['layer2'])
self.net2 = tf.layers.batch_normalization(self.net2, training=self.phase)
self.net2 = tf.nn.relu(self.net2)
self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=self.phase)
# the third hidden layer.
self.net3 = tf.matmul(self.layer2, self.weights['layer3'])
self.layer3 = tf.nn.tanh(self.net3)
# loss function.
self.cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(self.train_labels_center, self.layer3))
# optimizer.
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy)
# init.
self.init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(self.init)
def _initialize_weights(self):
self.all_weights = dict()
self.all_weights['layer1'] = tf.Variable(
tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim
self.all_weights['layer2'] = tf.Variable(
tf.random.normal([self.first_layer_size, self.second_layer_size], mean=0.0,
stddev=1)) # input_size * attr_dim
self.all_weights['layer3'] = tf.Variable(
tf.random.normal([self.second_layer_size, self.third_layer_size], mean=0.0,
stddev=1)) # input_size * attr_dim
self.all_weights['layer1'] = tf.Variable(
tf.random.uniform([10368, self.first_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
self.all_weights['layer2'] = tf.Variable(
tf.random.uniform([self.first_layer_size, self.second_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
self.all_weights['layer3'] = tf.Variable(
tf.random.uniform([self.second_layer_size, self.third_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
# --------------------------------------------------------------------------
self.all_weights['layer1'] = tf.get_variable("w", [32 * self.input_size, self.first_layer_size],
initializer=tf.initializers.random_normal(mean=0, stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
self.all_weights['layer2'] = tf.get_variable("w2", [self.first_layer_size, self.second_layer_size],
initializer=tf.initializers.random_normal(mean=0,
stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
self.all_weights['layer3'] = tf.get_variable("w3", [self.second_layer_size, self.third_layer_size],
initializer=tf.initializers.random_normal(mean=0, stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
return self.all_weights
def kmeans_clustering(self, point, size, true_labels):
self.kmeans = KMeans(n_clusters=2, random_state=10, init='k-means++', n_init=20).fit(point)
self.kmeans_labels = self.kmeans.labels_
# find index of samples that are in the first cluster
self.label_list_0 = np.where(self.kmeans_labels == 0)[0]
# get labels of samples that are in the first cluster
temp = [true_labels[i][0] for i in self.label_list_0]
temp.append(2)
# determine label(cluster center) of benign and malware group based on the majority samples in each cluster
counts = np.bincount(temp)
if counts[0] > counts[1]: # counts[0] : number of benign in the first cluster
benign_center = self.kmeans.cluster_centers_[0]
malware_center = self.kmeans.cluster_centers_[1]
else:
benign_center = self.kmeans.cluster_centers_[1]
malware_center = self.kmeans.cluster_centers_[0]
# set label for each sample
new_labels = np.zeros((size, self.third_layer_size))
for i in range(size):
if true_labels[i][0] == 0.0:
new_labels[i] = benign_center
else:
new_labels[i] = malware_center
self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center}
return new_labels
def partial_fit(self, X): # fit a batch
# get network output.
feed_dict = {self.train_data: X['batch_data_train']}
self.points = self.sess.run((self.layer3), feed_dict=feed_dict)
# apply clustering to find expected output.
new_labels = self.kmeans_clustering(self.points, len(X['batch_data_label']), X['batch_data_label'])
self.clusters_dist.append(np.linalg.norm(self.kmeans.cluster_centers_[0] - self.kmeans.cluster_centers_[1]))
feed_dicts = {self.train_data: X['batch_data_train'],
self.train_labels_center: new_labels}
loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts)
# print(loss)
# print('------------')
metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len((X['batch_data_label'])))
self.accuracy_list.append(metrics[0])
self.fmeasure_list.append(metrics[3])
return loss
def evaluate(self, true_labels, kmeans_labels, size):
"""
:param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1
:param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number
:param size: number of samples
:return: accuracy, precision, recall, f_measure
"""
# find index of samples that are in the first cluster
self.label_list_0 = np.where(kmeans_labels == 0)[0]
self.label_list_1 = np.where(kmeans_labels == 1)[0]
# get labels of samples that are in the first cluster
temp = [true_labels[i][0] for i in self.label_list_0]
temp1 = [true_labels[i][0] for i in self.label_list_1]
temp1.append(2)
temp.append(2)
# determine label(cluster center) of benign and malware group based on the majority samples in each cluster
counts = np.bincount(temp)
counts2 = np.bincount(temp1)
if counts[0] > counts[1]:
accuracy = (counts[0] + counts2[1]) / size
precision = counts2[1] / (counts2[1] + counts2[0])
recall = counts2[1] / (counts2[1] + counts[1])
f_measure = 2 * ((precision * recall) / (precision + recall))
else:
accuracy = (counts[1] + counts2[0]) / size
precision = counts[1] / (counts[1] + counts[0])
recall = counts[1] / (counts[1] + counts2[1])
f_measure = 2 * ((precision * recall) / (precision + recall))
return accuracy, precision, recall, f_measure
def final_fit(self, X, true_labels):
self.phase = False
# get network output for test data.
feed_dict = {self.train_data: X['data_test']}
self.points = self.sess.run(self.layer3, feed_dict=feed_dict)
# determine label of each test sample based on the euclidean distance
self.predicted_Labels = []
for i in range(len(true_labels)):
if np.linalg.norm(self.FinalCenters['benignCenter'] - self.points[i]) < np.linalg.norm(
self.FinalCenters['malwareCenter'] - self.points[i]):
self.predicted_Labels.append([0])
else:
self.predicted_Labels.append([1])
tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels).ravel()
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_measure = 2 * (precision * recall) / (precision + recall)
self.evaluation_metrics_list['accuracy'].append(np.float("{0:.4f}".format(accuracy)))
self.evaluation_metrics_list['precision'].append(np.float("{0:.4f}".format(precision)))
self.evaluation_metrics_list['recall'].append(np.float("{0:.4f}".format(recall)))
self.evaluation_metrics_list['fmeasure'].append(np.float("{0:.4f}".format(f_measure)))
print("accuracy", "precision", "recall", "f_measure", sep="\t\t\t\t\t")
print(accuracy, precision, recall, f_measure, sep="\t\t\t")
return 0
def train(self): # fit a dataset
for iter in range(self.iteration):
self.log("iteration {} ".format(iter))
for epoch in range(self.epoch):
self.accuracy_list = []
self.fmeasure_list = []
self.clusters_dist = []
self.log("epoch %s" % (epoch))
total_batches = int(len(self.X_train_ben['data']) / self.batch_size)
self.log('total_batches in epoch %s : %s ' % (epoch, total_batches))
start_index = 0
end_index = start_index + self.batch_size
self.counter = 0
# Loop over all batches.
for i in range(total_batches + 1):
self.counter += 1
# generate a batch data
batch_xs = {}
batch_xs['batch_data_train'] = np.concatenate(
[self.X_train_ben['data'][start_index:end_index], \
self.X_train_mal['data'][start_index:end_index]])
batch_xs['batch_data_label'] = np.concatenate(
[self.X_train_ben['label'][start_index:end_index], \
self.X_train_mal['label'][start_index:end_index]])
# Fit training using batch data
end_index = end_index + self.batch_size
cost = self.partial_fit(batch_xs)
# test
batch_test = {}
batch_test["data"] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']])
batch_test["label"] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']])
self.final_fit(batch_test, batch_test["label"])
# init all variables in a tensorflow graph for the next fold
self.sess.run(self.init)
return self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list
def log(self, message):
print(message)
def write_result_to_file(self, variable, message):
# file = open('result.txt', 'a+')
file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')
file.write(message + "\n")
file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(variable['accuracy'])) + '\t' + str(
np.mean(variable['precision'])) + '\t' + str(
np.mean(variable['recall'])) + '\t' + str(
np.mean(variable['fmeasure'])) + '+' + str(np.var(variable['fmeasure'])) + '\n')
|
normal
|
{
"blob_id": "a9947884e805cc8fcb6bff010a5f6e0ff0bb01fe",
"index": 8393,
"step-1": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n <mask token>\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n <mask token>\n <mask token>\n <mask token>\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n <mask token>\n <mask token>\n\n def log(self, message):\n print(message)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n <mask token>\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n <mask token>\n\n def log(self, message):\n print(message)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n <mask token>\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n\n def train(self):\n for iter in range(self.iteration):\n self.log('iteration {} '.format(iter))\n for epoch in range(self.epoch):\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.log('epoch %s' % epoch)\n total_batches = int(len(self.X_train_ben['data']) / self.\n batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch,\n total_batches))\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n for i in range(total_batches + 1):\n self.counter += 1\n batch_xs = {}\n batch_xs['batch_data_train'] = np.concatenate([self.\n X_train_ben['data'][start_index:end_index], self.\n X_train_mal['data'][start_index:end_index]])\n batch_xs['batch_data_label'] = np.concatenate([self.\n X_train_ben['label'][start_index:end_index], self.\n X_train_mal['label'][start_index:end_index]])\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n batch_test = {}\n batch_test['data'] = np.concatenate([self.X_test_ben['data'],\n self.X_test_mal['data']])\n batch_test['label'] = np.concatenate([self.X_test_ben['label'],\n self.X_test_mal['label']])\n self.final_fit(batch_test, batch_test['label'])\n self.sess.run(self.init)\n return (self.accuracy_list, self.fmeasure_list, self.clusters_dist,\n self.evaluation_metrics_list)\n\n def log(self, message):\n print(message)\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n\n def kmeans_clustering(self, point, size, true_labels):\n self.kmeans = KMeans(n_clusters=2, random_state=10, init=\n 'k-means++', n_init=20).fit(point)\n self.kmeans_labels = self.kmeans.labels_\n self.label_list_0 = np.where(self.kmeans_labels == 0)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp.append(2)\n counts = np.bincount(temp)\n if counts[0] > counts[1]:\n benign_center = self.kmeans.cluster_centers_[0]\n malware_center = self.kmeans.cluster_centers_[1]\n else:\n benign_center = self.kmeans.cluster_centers_[1]\n malware_center = self.kmeans.cluster_centers_[0]\n new_labels = np.zeros((size, self.third_layer_size))\n for i in range(size):\n if true_labels[i][0] == 0.0:\n new_labels[i] = benign_center\n else:\n new_labels[i] = malware_center\n self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter':\n malware_center}\n return new_labels\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n\n def train(self):\n for iter in range(self.iteration):\n self.log('iteration {} '.format(iter))\n for epoch in range(self.epoch):\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.log('epoch %s' % epoch)\n total_batches = int(len(self.X_train_ben['data']) / self.\n batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch,\n total_batches))\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n for i in range(total_batches + 1):\n self.counter += 1\n batch_xs = {}\n batch_xs['batch_data_train'] = np.concatenate([self.\n X_train_ben['data'][start_index:end_index], self.\n X_train_mal['data'][start_index:end_index]])\n batch_xs['batch_data_label'] = np.concatenate([self.\n X_train_ben['label'][start_index:end_index], self.\n X_train_mal['label'][start_index:end_index]])\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n batch_test = {}\n batch_test['data'] = np.concatenate([self.X_test_ben['data'],\n self.X_test_mal['data']])\n batch_test['label'] = np.concatenate([self.X_test_ben['label'],\n self.X_test_mal['label']])\n self.final_fit(batch_test, batch_test['label'])\n self.sess.run(self.init)\n return (self.accuracy_list, self.fmeasure_list, self.clusters_dist,\n self.evaluation_metrics_list)\n\n def log(self, message):\n print(message)\n\n def write_result_to_file(self, variable, message):\n file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')\n file.write(message + '\\n')\n file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(\n variable['accuracy'])) + '\\t' + str(np.mean(variable[\n 'precision'])) + '\\t' + str(np.mean(variable['recall'])) + '\\t' +\n str(np.mean(variable['fmeasure'])) + '+' + str(np.var(variable[\n 'fmeasure'])) + '\\n')\n",
"step-5": "import math\nimport numpy as np\n# import tkinter\nimport tensorflow as tf\nfrom matplotlib import axis\nimport os\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\n\n\nclass MD(BaseEstimator, TransformerMixin):\n def __init__(self, data, input_size, epoch,\n batch_size, iteration, alpha=1.0, n_neg_samples=10,\n random_seed=2020):\n # bind params to class\n\n # network parameters.\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n\n # data.\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n\n # evaluation.\n self.accuracy_list = [] # accuracy during training\n self.fmeasure_list = [] # fmeasure during training\n self.clusters_dist = [] # distance between clusters centroid\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [],\n 'fmeasure': []} # evaluation metrics of test data for all epochs\n\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n\n # init all variables in a tensorflow graph\n self._init_graph()\n\n def _init_graph(self):\n '''\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n '''\n self.graph = tf.Graph()\n with self.graph.as_default(): # , tf.device('/cpu:0'):\n\n # Set graph level random seed.\n tf.set_random_seed(self.random_seed)\n\n # Input data.\n\n self.train_data = tf.placeholder(tf.float32,\n shape=[None, self.input_size]) # batch_size * input_size\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # batch_size * 1\n self.train_labels_center = tf.placeholder(tf.float32, shape=[None,\n self.third_layer_size]) # batch_size * third_layer_size\n self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None,\n self.third_layer_size]) # batch_size * third_layer_size\n\n # Variables.\n self.weights = self._initialize_weights()\n\n # the embedding layer.\n self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n\n # the first hidden layer.\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']) # batch_size * first_layer_size\n self.layer1 = tf.layers.batch_normalization(self.net1, training=self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n\n # the second hidden layer.\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=self.phase)\n\n # the third hidden layer.\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n\n # loss function.\n self.cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(self.train_labels_center, self.layer3))\n\n # optimizer.\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy)\n\n # init.\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n\n self.all_weights = dict()\n\n self.all_weights['layer1'] = tf.Variable(\n tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.Variable(\n tf.random.normal([self.first_layer_size, self.second_layer_size], mean=0.0,\n stddev=1)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.Variable(\n tf.random.normal([self.second_layer_size, self.third_layer_size], mean=0.0,\n stddev=1)) # input_size * attr_dim\n\n self.all_weights['layer1'] = tf.Variable(\n tf.random.uniform([10368, self.first_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.Variable(\n tf.random.uniform([self.first_layer_size, self.second_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.Variable(\n tf.random.uniform([self.second_layer_size, self.third_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n # --------------------------------------------------------------------------\n self.all_weights['layer1'] = tf.get_variable(\"w\", [32 * self.input_size, self.first_layer_size],\n initializer=tf.initializers.random_normal(mean=0, stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.get_variable(\"w2\", [self.first_layer_size, self.second_layer_size],\n initializer=tf.initializers.random_normal(mean=0,\n stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.get_variable(\"w3\", [self.second_layer_size, self.third_layer_size],\n initializer=tf.initializers.random_normal(mean=0, stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n\n return self.all_weights\n\n def kmeans_clustering(self, point, size, true_labels):\n self.kmeans = KMeans(n_clusters=2, random_state=10, init='k-means++', n_init=20).fit(point)\n\n self.kmeans_labels = self.kmeans.labels_\n\n # find index of samples that are in the first cluster\n self.label_list_0 = np.where(self.kmeans_labels == 0)[0]\n\n # get labels of samples that are in the first cluster\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp.append(2)\n\n # determine label(cluster center) of benign and malware group based on the majority samples in each cluster\n counts = np.bincount(temp)\n\n if counts[0] > counts[1]: # counts[0] : number of benign in the first cluster\n benign_center = self.kmeans.cluster_centers_[0]\n malware_center = self.kmeans.cluster_centers_[1]\n else:\n benign_center = self.kmeans.cluster_centers_[1]\n malware_center = self.kmeans.cluster_centers_[0]\n\n # set label for each sample\n new_labels = np.zeros((size, self.third_layer_size))\n\n for i in range(size):\n if true_labels[i][0] == 0.0:\n new_labels[i] = benign_center\n else:\n new_labels[i] = malware_center\n\n self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center}\n\n return new_labels\n\n def partial_fit(self, X): # fit a batch\n\n # get network output.\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run((self.layer3), feed_dict=feed_dict)\n\n # apply clustering to find expected output.\n new_labels = self.kmeans_clustering(self.points, len(X['batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n\n feed_dicts = {self.train_data: X['batch_data_train'],\n self.train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts)\n\n # print(loss)\n # print('------------')\n\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len((X['batch_data_label'])))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n\n # find index of samples that are in the first cluster\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n\n # get labels of samples that are in the first cluster\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n\n # determine label(cluster center) of benign and malware group based on the majority samples in each cluster\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * ((precision * recall) / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * ((precision * recall) / (precision + recall))\n\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n\n self.phase = False\n\n # get network output for test data.\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n\n # determine label of each test sample based on the euclidean distance\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.points[i]) < np.linalg.norm(\n self.FinalCenters['malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels).ravel()\n\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n\n self.evaluation_metrics_list['accuracy'].append(np.float(\"{0:.4f}\".format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float(\"{0:.4f}\".format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float(\"{0:.4f}\".format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float(\"{0:.4f}\".format(f_measure)))\n\n print(\"accuracy\", \"precision\", \"recall\", \"f_measure\", sep=\"\\t\\t\\t\\t\\t\")\n print(accuracy, precision, recall, f_measure, sep=\"\\t\\t\\t\")\n\n return 0\n\n def train(self): # fit a dataset\n\n for iter in range(self.iteration):\n self.log(\"iteration {} \".format(iter))\n\n for epoch in range(self.epoch):\n\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n\n self.log(\"epoch %s\" % (epoch))\n\n total_batches = int(len(self.X_train_ben['data']) / self.batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch, total_batches))\n\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n\n # Loop over all batches.\n for i in range(total_batches + 1):\n self.counter += 1\n\n # generate a batch data\n batch_xs = {}\n\n batch_xs['batch_data_train'] = np.concatenate(\n [self.X_train_ben['data'][start_index:end_index], \\\n self.X_train_mal['data'][start_index:end_index]])\n\n batch_xs['batch_data_label'] = np.concatenate(\n [self.X_train_ben['label'][start_index:end_index], \\\n self.X_train_mal['label'][start_index:end_index]])\n\n # Fit training using batch data\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n\n\n # test\n batch_test = {}\n batch_test[\"data\"] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']])\n batch_test[\"label\"] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']])\n\n self.final_fit(batch_test, batch_test[\"label\"])\n\n # init all variables in a tensorflow graph for the next fold\n self.sess.run(self.init)\n\n return self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list\n\n def log(self, message):\n print(message)\n\n def write_result_to_file(self, variable, message):\n # file = open('result.txt', 'a+')\n file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')\n file.write(message + \"\\n\")\n file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(variable['accuracy'])) + '\\t' + str(\n np.mean(variable['precision'])) + '\\t' + str(\n np.mean(variable['recall'])) + '\\t' + str(\n np.mean(variable['fmeasure'])) + '+' + str(np.var(variable['fmeasure'])) + '\\n')\n\n",
"step-ids": [
4,
8,
9,
11,
13
]
}
|
[
4,
8,
9,
11,
13
] |
import openerp
from openerp import pooler
from openerp.report import report_sxw
import xlwt
from openerp.addons.report_xls.report_xls import report_xls
from openerp.tools.translate import _
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('COURSE LIST'),
})
_column_sizes = [
('0',30),
('1',30),
('2',20)
]
import time
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
#write empty to define column
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in range(0,len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title',3,0,'text','Subject: %s' %(so.name)),]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls','openacademy.course', parser=openacademy_course_xls_parser)
|
normal
|
{
"blob_id": "5c415d5bf9d6952863a662d300cb1f706ef02a8f",
"index": 1048,
"step-1": "<mask token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n<mask token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n<mask token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"step-3": "<mask token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n_column_sizes = [('0', 30), ('1', 30), ('2', 20)]\n<mask token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"step-4": "import openerp\nfrom openerp import pooler\nfrom openerp.report import report_sxw\nimport xlwt\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom openerp.tools.translate import _\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n_column_sizes = [('0', 30), ('1', 30), ('2', 20)]\nimport time\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"step-5": "import openerp\nfrom openerp import pooler\nfrom openerp.report import report_sxw\nimport xlwt\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom openerp.tools.translate import _\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\tdef __init__(self, cursor, uid, name, context):\n\t\tsuper(openacademy_course_xls_parser, self).__init__(cursor, uid, name, context=context)\n\t\tself.pool = pooler.get_pool(self.cr.dbname)\n\t\tself.cursor = self.cr\n\n\t\tself.localcontext.update({\n\t\t\t'cr': cursor,\n\t\t\t'uid': uid,\n\t\t\t'report_name': _('COURSE LIST'),\n\t\t\t})\n\n_column_sizes = [\n\t('0',30),\n\t('1',30),\n\t('2',20)\n]\n\nimport time\n\nclass openacademy_course_xls(report_xls):\n\tcolumn_sizes = [x[1] for x in _column_sizes]\n\n\tdef generate_xls_report(self, _p, _xs, data, objects, wb):\n\t\tws = wb.add_sheet(_p.report_name[:31])\n\t\tws.panes_frozen = True\n\t\tws.remove_splits = True\n\t\tws.portrait = 0\n\t\tws.fit_width_to_pages = 1\n\t\trow_pos = 6\n\n\t\tws.header_str = self.xls_headers['standard']\n\t\tws.footer_str = self.xls_footers['standard']\n\n\t\t#write empty to define column\n\t\tc_sizes = self.column_sizes\n\t\tc_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in range(0,len(c_sizes))]\n\t\tcell_format = _xs['bold'] + _xs['underline']\n\t\tso_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n\t\ttable_title_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['right']\n\t\tright_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['underline'] + _xs['right']\n\t\tunderline_style = xlwt.easyxf(cell_format)\n\n\t\tfor so in objects:\n\t\t\tc_specs = [('title',3,0,'text','Subject: %s' %(so.name)),]\n\n\t\t\trow_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n\t\t\trow_pos = self.xls_write_row(ws, row_pos, row_data)\n\t\t\tws.set_horz_split_pos(row_pos)\n\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls','openacademy.course', parser=openacademy_course_xls_parser)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python
import numpy as np
import rospy
import tf
from geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point
from nav_msgs.msg import Odometry
from visualization_msgs.msg import Marker
from bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver
# The frame by default is NWU
class BebopNmpcControl:
def __init__(self, mpc_form_param):
# MPC formulation settings
self.mpc_form_param_ = mpc_form_param
# bebop param
self.roll_max_ = self.mpc_form_param_.roll_max
self.pitch_max_ = self.mpc_form_param_.pitch_max
self.vz_max_ = self.mpc_form_param_.vz_max
self.yawrate_max_ = self.mpc_form_param_.yawrate_max
self.K_yaw_ = self.mpc_form_param_.K_yaw
self.bebop_size_ = self.mpc_form_param_.bebop_size
# state and goal pose, size
self.bebop_state_current_ = np.zeros(9)
self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])
# collision avoidance obs param
self.nobs_ = self.mpc_form_param_.nobs
self.obs_size_ = self.mpc_form_param_.obs_size
self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])
self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T
# MPC settings
self.mpc_dt_ = self.mpc_form_param_.dt
self.mpc_N_ = self.mpc_form_param_.N
self.mpc_Tf_ = self.mpc_form_param_.Tf
self.mpc_nx_ = self.mpc_form_param_.nx
self.mpc_nu_ = self.mpc_form_param_.nu
self.mpc_ns_ = self.mpc_form_param_.ns
self.mpc_np_ = self.mpc_form_param_.nparam
self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp
self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input
self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll
self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack
# MPC variables
self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1)
self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_
self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))
self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))
self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))
self.mpc_u_now_ = np.zeros(self.mpc_nu_)
self.mpc_feasible_ = False
self.mpc_success_ = False
# MPC solver
recompile = False
[self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \
bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile)
# ROS subscriber
self.odom_sub_ = rospy.Subscriber("/bebop/odom", Odometry, self.set_bebop_odom) # bebop_odom
self.received_first_odom_ = False
self.odom_received_time_ = rospy.Time.now()
self.odom_time_out_ = 0.2
self.pose_sub_ = rospy.Subscriber("/bebop/pose", PoseStamped, self.set_bebop_pose)
self.twist_sub_ = rospy.Subscriber("/bebop/twist", TwistStamped, self.set_bebop_twist)
self.pose_goal_sub_ = rospy.Subscriber("/bebop/pose_goal", PoseStamped, self.set_bebop_pose_goal)
self.received_first_goal_ = False
# ROS publisher
self.bebop_cmd_vel_ = np.array(4)
self.bebop_cmd_vel_pub_ = rospy.Publisher("/bebop/auto_cmd_vel", Twist, queue_size=1)
self.mpc_traj_plan_vis_pub_ = rospy.Publisher("/bebop/mpc/trajectory_plan_vis", Marker, queue_size=1)
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
# read data
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x,
odom_msg.pose.pose.orientation.y,
odom_msg.pose.pose.orientation.z,
odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False: # if not received any goal pose
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x,
pose_msg.pose.orientation.y,
pose_msg.pose.orientation.z,
pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False: # if not received any goal pose
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x,
pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z,
pose_goal_msg.pose.orientation.w])
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]])
def obs_motion_prediction(self):
for iStage in range(0, self.mpc_N_):
self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \
+ self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_
def reset_nlp_solver(self):
# initialize plan
u_reset = np.zeros(self.mpc_nu_)
x_reset = np.zeros(self.mpc_nx_)
s_reset = np.zeros(self.mpc_ns_)
# x_reset = self.bebop_state_current_[:self.mpc_nx_]
x_reset[0:3] = self.bebop_state_current_[0:3]
x_reset[6:8] = self.bebop_state_current_[6:8]
nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1)
self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1)
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1)
def set_nlp_params(self):
parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage
for iStage in range(0, self.mpc_N_):
parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \
np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2],
self.bebop_state_current_[8]])
parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_
parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_
parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate((
self.obs_state_prediction_[0:3, iStage], self.obs_size_
))
if iStage == self.mpc_N_ - 1: # terminal weights
parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(
(self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_,
self.mpc_weights_coll_, self.mpc_weights_slack_)
)
else:
parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(
(0.05 * self.mpc_weights_wp_, self.mpc_weights_input_,
self.mpc_weights_coll_, self.mpc_weights_slack_)
)
# set parameters
self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_],
np.transpose(parameters_all_stage).reshape(-1)))
def run_nlp_solver(self):
# initialize solver
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
# set solver params
self.set_nlp_params()
# call the solver
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_,
p=self.mpc_nlp_param_,
lbx=self.nlp_lbx_,
ubx=self.nlp_ubx_,
lbg=self.nlp_lbg_,
ubg=self.nlp_ubg_)
# deal with infeasibility
if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn("MPC infeasible!")
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time)
# obtain solution
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
# if odom received
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
# run the nlp solver
self.run_nlp_solver()
# control commands
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
# yaw control
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_)
# obtained command
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = "map"
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
# set the scale of the marker
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
# set the color
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
# Set the pose of the marker
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
# points
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn("MPC trajectory plan not published!")
def bebop_nmpc_control():
# create a node
rospy.loginfo("Starting Bebop NMPC Control...")
rospy.init_node("bebop_nmpc_control_node", anonymous=False)
hz = 50
rate = rospy.Rate(hz)
rospy.sleep(1.0)
# formulation
mpc_form_param = BebopNmpcFormulationParam()
# control
bebop_nmpc = BebopNmpcControl(mpc_form_param)
while not rospy.is_shutdown():
if bebop_nmpc.received_first_odom_ is False:
rospy.logwarn('Waiting for first Odometry!')
elif bebop_nmpc.received_first_goal_ is False:
rospy.logwarn('Waiting for first goal pose!')
else:
bebop_nmpc.calculate_bebop_cmd_vel()
bebop_nmpc.pub_bebop_cmd_vel()
bebop_nmpc.pub_mpc_traj_plan_vis()
rate.sleep()
if __name__ == "__main__":
bebop_nmpc_control()
|
normal
|
{
"blob_id": "76d0dd2d6b2d580900283f2623f05dd02a70fcd8",
"index": 6825,
"step-1": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n <mask token>\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n <mask token>\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BebopNmpcControl:\n\n def __init__(self, mpc_form_param):\n self.mpc_form_param_ = mpc_form_param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.\n obs_state_current_), (self.mpc_form_param_.N, 1)).T\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.\n mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n recompile = False\n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.\n nlp_lbg_, self.nlp_ubg_] = bebop_nmpc_casadi_solver(self.\n mpc_form_param_, recompile)\n self.odom_sub_ = rospy.Subscriber('/bebop/odom', Odometry, self.\n set_bebop_odom)\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n self.pose_sub_ = rospy.Subscriber('/bebop/pose', PoseStamped, self.\n set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber('/bebop/twist', TwistStamped,\n self.set_bebop_twist)\n self.pose_goal_sub_ = rospy.Subscriber('/bebop/pose_goal',\n PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher('/bebop/auto_cmd_vel',\n Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\n '/bebop/mpc/trajectory_plan_vis', Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\ndef bebop_nmpc_control():\n rospy.loginfo('Starting Bebop NMPC Control...')\n rospy.init_node('bebop_nmpc_control_node', anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n mpc_form_param = BebopNmpcFormulationParam()\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point\nfrom nav_msgs.msg import Odometry\nfrom visualization_msgs.msg import Marker\nfrom bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver\n\n\n# The frame by default is NWU\n\n\nclass BebopNmpcControl:\n def __init__(self, mpc_form_param):\n # MPC formulation settings\n self.mpc_form_param_ = mpc_form_param\n\n # bebop param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n\n # state and goal pose, size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n\n # collision avoidance obs param\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T\n\n # MPC settings\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n\n # MPC variables\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n\n # MPC solver\n recompile = False \n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \\\n bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile)\n\n # ROS subscriber\n self.odom_sub_ = rospy.Subscriber(\"/bebop/odom\", Odometry, self.set_bebop_odom) # bebop_odom\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n\n self.pose_sub_ = rospy.Subscriber(\"/bebop/pose\", PoseStamped, self.set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber(\"/bebop/twist\", TwistStamped, self.set_bebop_twist)\n\n self.pose_goal_sub_ = rospy.Subscriber(\"/bebop/pose_goal\", PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False \n\n # ROS publisher\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher(\"/bebop/auto_cmd_vel\", Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\"/bebop/mpc/trajectory_plan_vis\", Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n # read data\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x,\n odom_msg.pose.pose.orientation.y,\n odom_msg.pose.pose.orientation.z,\n odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x,\n pose_msg.pose.orientation.y,\n pose_msg.pose.orientation.z,\n pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x,\n pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z,\n pose_goal_msg.pose.orientation.w])\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \\\n + self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n # initialize plan\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n # x_reset = self.bebop_state_current_[:self.mpc_nx_]\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \\\n np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2],\n self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate((\n self.obs_state_prediction_[0:3, iStage], self.obs_size_\n ))\n if iStage == self.mpc_N_ - 1: # terminal weights\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n else:\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (0.05 * self.mpc_weights_wp_, self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n # set parameters\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_],\n np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n # initialize solver\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n\n # set solver params\n self.set_nlp_params()\n\n # call the solver\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_,\n p=self.mpc_nlp_param_,\n lbx=self.nlp_lbx_,\n ubx=self.nlp_ubx_,\n lbg=self.nlp_lbg_,\n ubg=self.nlp_ubg_)\n\n # deal with infeasibility\n if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn(\"MPC infeasible!\")\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time)\n\n # obtain solution\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n # if odom received\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n # run the nlp solver\n self.run_nlp_solver()\n\n # control commands\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n\n # yaw control\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_)\n\n # obtained command\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = \"map\"\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n # set the scale of the marker\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n # set the color\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n # Set the pose of the marker\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n # points\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn(\"MPC trajectory plan not published!\")\n\n\ndef bebop_nmpc_control():\n # create a node\n rospy.loginfo(\"Starting Bebop NMPC Control...\")\n rospy.init_node(\"bebop_nmpc_control_node\", anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n\n # formulation\n mpc_form_param = BebopNmpcFormulationParam()\n\n # control\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n bebop_nmpc_control()\n",
"step-ids": [
10,
12,
13,
15,
18
]
}
|
[
10,
12,
13,
15,
18
] |
# coding=utf-8
"""SCALE UI: feature tests."""
import pytest
import xpaths
from function import (
wait_on_element,
is_element_present,
wait_on_element_disappear
)
from pytest_bdd import (
given,
scenario,
then,
when,
)
@pytest.mark.dependency(name='Set_Group')
@scenario('features/NAS-T1250.feature', 'Verify that you can create a new group')
def test_verify_that_you_can_create_a_new_group():
"""Verify that you can create a new group."""
@given('the browser is open, navigate to the SCALE URL, and login')
def the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip, root_password):
"""the browser is open, navigate to the SCALE URL, and login."""
if nas_ip not in driver.current_url:
driver.get(f"http://{nas_ip}")
assert wait_on_element(driver, 10, xpaths.login.user_Input)
if not is_element_present(driver, xpaths.side_Menu.dashboard):
assert wait_on_element(driver, 10, xpaths.login.user_Input)
driver.find_element_by_xpath(xpaths.login.user_Input).clear()
driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')
driver.find_element_by_xpath(xpaths.login.password_Input).clear()
driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(root_password)
assert wait_on_element(driver, 5, xpaths.login.signin_Button)
driver.find_element_by_xpath(xpaths.login.signin_Button).click()
else:
assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()
@when('on the dashboard click on Credentials and Local Groups')
def on_the_dashboard_click_on_credentials_and_local_groups(driver):
"""on the dashboard click on Credentials and Local Groups."""
assert wait_on_element(driver, 10, xpaths.dashboard.title)
assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)
assert wait_on_element(driver, 10, xpaths.side_Menu.credentials, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()
assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group, 'clickable')
driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()
@then('on the Groups page, click Add')
def on_the_groups_page_click_add(driver):
"""on the Groups page, click Add."""
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')
driver.find_element_by_xpath(xpaths.button.add).click()
@then('on the Add Group side box input the group name')
def on_the_add_group_side_box_input_the_group_name(driver):
"""on the Add Group side box input the group name."""
assert wait_on_element(driver, 7, xpaths.add_Group.title)
assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')
driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()
driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys('qetest')
@then('click save and verify the group was added')
def click_save_and_verify_the_group_was_added(driver):
"""click save and verify the group was added."""
assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')
driver.find_element_by_xpath(xpaths.button.save).click()
assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)
assert wait_on_element(driver, 10, xpaths.groups.title)
assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)
|
normal
|
{
"blob_id": "f4aaf0449bff68814090552ea4f6ccac85dacf1b",
"index": 5617,
"step-1": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n<mask token>\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-4": "<mask token>\nimport pytest\nimport xpaths\nfrom function import wait_on_element, is_element_present, wait_on_element_disappear\nfrom pytest_bdd import given, scenario, then, when\n\n\[email protected](name='Set_Group')\n@scenario('features/NAS-T1250.feature',\n 'Verify that you can create a new group')\ndef test_verify_that_you_can_create_a_new_group():\n \"\"\"Verify that you can create a new group.\"\"\"\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip,\n root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f'http://{nas_ip}')\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(\n root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group,\n 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys(\n 'qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-5": "# coding=utf-8\n\"\"\"SCALE UI: feature tests.\"\"\"\n\nimport pytest\nimport xpaths\nfrom function import (\n wait_on_element,\n is_element_present,\n wait_on_element_disappear\n)\nfrom pytest_bdd import (\n given,\n scenario,\n then,\n when,\n)\n\n\[email protected](name='Set_Group')\n@scenario('features/NAS-T1250.feature', 'Verify that you can create a new group')\ndef test_verify_that_you_can_create_a_new_group():\n \"\"\"Verify that you can create a new group.\"\"\"\n\n\n@given('the browser is open, navigate to the SCALE URL, and login')\ndef the_browser_is_open_navigate_to_the_scale_url_and_login(driver, nas_ip, root_password):\n \"\"\"the browser is open, navigate to the SCALE URL, and login.\"\"\"\n if nas_ip not in driver.current_url:\n driver.get(f\"http://{nas_ip}\")\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys('root')\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(root_password)\n assert wait_on_element(driver, 5, xpaths.login.signin_Button)\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()\n\n\n@when('on the dashboard click on Credentials and Local Groups')\ndef on_the_dashboard_click_on_credentials_and_local_groups(driver):\n \"\"\"on the dashboard click on Credentials and Local Groups.\"\"\"\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)\n assert wait_on_element(driver, 10, xpaths.side_Menu.credentials, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.credentials).click()\n assert wait_on_element(driver, 10, xpaths.side_Menu.local_Group, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.local_Group).click()\n\n\n@then('on the Groups page, click Add')\ndef on_the_groups_page_click_add(driver):\n \"\"\"on the Groups page, click Add.\"\"\"\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.button.add, 'clickable')\n driver.find_element_by_xpath(xpaths.button.add).click()\n\n\n@then('on the Add Group side box input the group name')\ndef on_the_add_group_side_box_input_the_group_name(driver):\n \"\"\"on the Add Group side box input the group name.\"\"\"\n assert wait_on_element(driver, 7, xpaths.add_Group.title)\n assert wait_on_element(driver, 7, xpaths.add_Group.name_Input, 'inputable')\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).clear()\n driver.find_element_by_xpath(xpaths.add_Group.name_Input).send_keys('qetest')\n\n\n@then('click save and verify the group was added')\ndef click_save_and_verify_the_group_was_added(driver):\n \"\"\"click save and verify the group was added.\"\"\"\n assert wait_on_element(driver, 7, xpaths.button.save, 'clickable')\n driver.find_element_by_xpath(xpaths.button.save).click()\n assert wait_on_element_disappear(driver, 20, xpaths.progress.progressbar)\n assert wait_on_element(driver, 10, xpaths.groups.title)\n assert wait_on_element(driver, 10, xpaths.groups.qetest_Name)\n",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client,
service_one,
api_user_active,
sample_invite,
mock_get_service,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_accept_invite,
mock_add_user_to_service,
):
expected_service = service_one['id']
expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(
client,
mocker,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(
client,
mocker,
sample_invite,
mock_get_service,
):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_existing_user_of_service_get_redirected_to_signin(
client,
mocker,
api_user_active,
sample_invite,
mock_get_service,
mock_get_user_by_email,
mock_accept_invite,
):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(
client,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip() == 'Your account will be created with this email: [email protected]' # noqa
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client,
service_one,
mocker,
mock_get_user,
mock_get_service,
):
cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_accept_invite,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'],
'email_address': invited_user['email_address'],
'from_user': invited_user['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])
mock_register_user.assert_called_with(data['name'],
data['email_address'],
data['mobile_number'],
data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(
logged_in_client,
mocker,
api_user_active,
sample_invite,
mock_get_user,
mock_accept_invite,
mock_get_service,
):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert "You’re signed in as [email protected]." in banner_contents
assert "This invite is for another email address." in banner_contents
assert "Sign out and click the link again to accept this invite." in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_check_verify_code,
mock_get_user,
mock_update_user,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_jobs,
mock_has_permissions,
mock_get_users_by_service,
mock_get_detailed_service,
mock_get_usage,
):
# visit accept token page
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
data = {'service': sample_invite['service'],
'email_address': sample_invite['email_address'],
'from_user': sample_invite['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
# get redirected to register from invite
response = client.post(url_for('main.register_from_invite'), data=data)
# that sends user on to verify
response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)
# when they post codes back to admin user should be added to
# service and sent on to dash board
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
|
normal
|
{
"blob_id": "0baa133bd9eb8a162a82b23ba4d26cdd34f701c4",
"index": 1507,
"step-1": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-3": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-4": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\nimport app\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-5": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\n\nimport app\n\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_accept_invite,\n mock_add_user_to_service,\n):\n\n expected_service = service_one['id']\n expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(\n client,\n mocker,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(\n client,\n mocker,\n sample_invite,\n mock_get_service,\n):\n\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(\n client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_get_user_by_email,\n mock_accept_invite,\n):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip() == 'Your account will be created with this email: [email protected]' # noqa\n\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client,\n service_one,\n mocker,\n mock_get_user,\n mock_get_service,\n):\n cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_accept_invite,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n\n data = {'service': invited_user['service'],\n 'email_address': invited_user['email_address'],\n 'from_user': invited_user['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])\n\n mock_register_user.assert_called_with(data['name'],\n data['email_address'],\n data['mobile_number'],\n data['password'])\n\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(\n logged_in_client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_user,\n mock_accept_invite,\n mock_get_service,\n):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert \"You’re signed in as [email protected].\" in banner_contents\n assert \"This invite is for another email address.\" in banner_contents\n assert \"Sign out and click the link again to accept this invite.\" in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_check_verify_code,\n mock_get_user,\n mock_update_user,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n mock_get_service_templates,\n mock_get_template_statistics,\n mock_get_jobs,\n mock_has_permissions,\n mock_get_users_by_service,\n mock_get_detailed_service,\n mock_get_usage,\n):\n\n # visit accept token page\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n data = {'service': sample_invite['service'],\n 'email_address': sample_invite['email_address'],\n 'from_user': sample_invite['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n # get redirected to register from invite\n response = client.post(url_for('main.register_from_invite'), data=data)\n\n # that sends user on to verify\n response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)\n\n # when they post codes back to admin user should be added to\n # service and sent on to dash board\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')\n assert service_one['id'] == session['service_id']\n\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
def search_way(adjacency_list, points):
use = [False for i in range(points.__len__())]
way = [0 for i in range(points.__len__())]
cost = [100000 for i in range(points.__len__())]
cost[0] = 0
checkVar = 0
test = True
while test:
min = 100000
for i in range(points.__len__()):
if (cost[i] < min) and (not use[i]):
checkVar = i
min = cost[i]
for i in range(adjacency_list[checkVar + 1].__len__()):
bestStation = adjacency_list[checkVar + 1][i].arrivalPointId - 1
bestValue = adjacency_list[checkVar + 1][i].price
if(cost[i] + bestValue < cost[bestStation]):
way[bestStation] = adjacency_list[checkVar + 1][i]
cost[bestStation] = cost[checkVar] + bestValue
use[checkVar] = True
test = False
for i in range(adjacency_list[checkVar + 1].__len__()):
if use[i] == False:
test = True
print(cost)
print(points)
return way;
|
normal
|
{
"blob_id": "1e4d21998b9f8915167166e5965b0c8c87fcf61d",
"index": 3060,
"step-1": "<mask token>\n",
"step-2": "def search_way(adjacency_list, points):\n use = [(False) for i in range(points.__len__())]\n way = [(0) for i in range(points.__len__())]\n cost = [(100000) for i in range(points.__len__())]\n cost[0] = 0\n checkVar = 0\n test = True\n while test:\n min = 100000\n for i in range(points.__len__()):\n if cost[i] < min and not use[i]:\n checkVar = i\n min = cost[i]\n for i in range(adjacency_list[checkVar + 1].__len__()):\n bestStation = adjacency_list[checkVar + 1][i].arrivalPointId - 1\n bestValue = adjacency_list[checkVar + 1][i].price\n if cost[i] + bestValue < cost[bestStation]:\n way[bestStation] = adjacency_list[checkVar + 1][i]\n cost[bestStation] = cost[checkVar] + bestValue\n use[checkVar] = True\n test = False\n for i in range(adjacency_list[checkVar + 1].__len__()):\n if use[i] == False:\n test = True\n print(cost)\n print(points)\n return way\n",
"step-3": "def search_way(adjacency_list, points):\n use = [False for i in range(points.__len__())]\n way = [0 for i in range(points.__len__())]\n cost = [100000 for i in range(points.__len__())]\n cost[0] = 0\n checkVar = 0\n test = True\n while test:\n min = 100000\n for i in range(points.__len__()):\n if (cost[i] < min) and (not use[i]):\n checkVar = i\n min = cost[i]\n for i in range(adjacency_list[checkVar + 1].__len__()):\n bestStation = adjacency_list[checkVar + 1][i].arrivalPointId - 1\n bestValue = adjacency_list[checkVar + 1][i].price\n if(cost[i] + bestValue < cost[bestStation]):\n way[bestStation] = adjacency_list[checkVar + 1][i]\n cost[bestStation] = cost[checkVar] + bestValue\n use[checkVar] = True\n test = False\n for i in range(adjacency_list[checkVar + 1].__len__()):\n if use[i] == False:\n test = True\n print(cost)\n print(points)\n return way;",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
__author__ = 'Administrator'
# 抓取IP的主要逻辑
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents=[
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S"
]
userAgent=random.choice(user_agents)
return userAgent
def getProxies():
proxies=[]
for i in range(1,10):
url="http://www.xicidaili.com/nn/{0}".format(i)
userAgent=getRandomUserAgnet()
headers={"User-Agent":userAgent}
opener=urllib.request.build_opener()
opener.addheaders=[headers]
try:
data=opener.open(url,timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector=etree.HTML(data)
ip_addr=selector.xpath("//tr[@class='odd']/td[2]/text()")
port=selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time=selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time=selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip=ip_addr[j]+":"+port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies=[]
testUrl="http://www.baidu.com"
userAgent=getRandomUserAgnet()
proxy_support=urllib.request.ProxyHandler({"http":currentIp})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent",userAgent)]
urllib.request.install_opener(opener)
try:
res=urllib.request.urlopen(testUrl,timeout=5).read()
if len(res)!=0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2,'code'):
logging.debug("unvalid ipaddress"+currentIp+str(er2.code))
if hasattr(er2,"reason"):
logging.debug("reason is the "+currentIp+str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__=="__main__":
getProxies()
|
normal
|
{
"blob_id": "911631e96d21bdf22a219007f1bdc04a5e6965dc",
"index": 739,
"step-1": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-3": "__author__ = 'Administrator'\n<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-4": "__author__ = 'Administrator'\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-5": "__author__ = 'Administrator'\n# 抓取IP的主要逻辑\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\ndef getRandomUserAgnet():\n user_agents=[\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S\"\n ]\n userAgent=random.choice(user_agents)\n return userAgent\ndef getProxies():\n proxies=[]\n for i in range(1,10):\n url=\"http://www.xicidaili.com/nn/{0}\".format(i)\n userAgent=getRandomUserAgnet()\n headers={\"User-Agent\":userAgent}\n opener=urllib.request.build_opener()\n opener.addheaders=[headers]\n try:\n data=opener.open(url,timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector=etree.HTML(data)\n ip_addr=selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port=selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time=selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time=selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip=ip_addr[j]+\":\"+port[j]\n proxies.append(ip)\n return proxies\ndef verify_ip(currentIp):\n tmp_proxies=[]\n testUrl=\"http://www.baidu.com\"\n userAgent=getRandomUserAgnet()\n proxy_support=urllib.request.ProxyHandler({\"http\":currentIp})\n opener=urllib.request.build_opener(proxy_support)\n opener.addheaders=[(\"User-Agent\",userAgent)]\n urllib.request.install_opener(opener)\n try:\n res=urllib.request.urlopen(testUrl,timeout=5).read()\n if len(res)!=0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2,'code'):\n logging.debug(\"unvalid ipaddress\"+currentIp+str(er2.code))\n if hasattr(er2,\"reason\"):\n logging.debug(\"reason is the \"+currentIp+str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\nif __name__==\"__main__\":\n getProxies()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Moving Averages Code
# Load the necessary packages and modules
import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
# Simple Moving Average
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
# SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')
data = data.join(SMA)
return data
# Exponentially-weighted Moving Average
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'],
span=ndays,
min_periods=ndays - 1).mean(),
name='EWMA')
data = data.join(EMA)
return data
# Retrieve the Nifty data from Yahoo finance:
# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')
# close = XSHE000002_data['close']
#
# # Compute the 50-day SMA for NIFTY
# n = 50
# SMA_NIFTY = SMA(XSHE000002_data, n)
# SMA_NIFTY = SMA_NIFTY.dropna()
# SMA = SMA_NIFTY['SMA']
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
# Compute the 200-day EWMA for NIFTY
# ew = 200
# EWMA_NIFTY = EWMA(XSHE000002_data, ew)
# EWMA_NIFTY = EWMA_NIFTY.dropna()
# EWMA = EWMA_NIFTY['EWMA_200']
# Plotting the NIFTY Price Series chart and Moving Averages below
# plt.figure(figsize=(9, 5))
# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')
# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')
# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')
# plt.legend(loc=2, prop={'size': 11})
# plt.grid(True)
# plt.setp(plt.gca().get_xticklabels(), rotation=30)
# plt.show()
|
normal
|
{
"blob_id": "4c9f2b6fd119daa58b7f1dd7153c90df747e62cb",
"index": 1249,
"step-1": "<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\n<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-3": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-5": "# Moving Averages Code\n\n# Load the necessary packages and modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\n# Simple Moving Average \ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n # SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')\n data = data.join(SMA)\n return data\n\n\n# Exponentially-weighted Moving Average\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'],\n span=ndays,\n min_periods=ndays - 1).mean(),\n name='EWMA')\n data = data.join(EMA)\n return data\n\n\n# Retrieve the Nifty data from Yahoo finance:\n# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')\n# close = XSHE000002_data['close']\n#\n# # Compute the 50-day SMA for NIFTY\n# n = 50\n# SMA_NIFTY = SMA(XSHE000002_data, n)\n# SMA_NIFTY = SMA_NIFTY.dropna()\n# SMA = SMA_NIFTY['SMA']\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n\n# Compute the 200-day EWMA for NIFTY\n# ew = 200\n# EWMA_NIFTY = EWMA(XSHE000002_data, ew)\n# EWMA_NIFTY = EWMA_NIFTY.dropna()\n# EWMA = EWMA_NIFTY['EWMA_200']\n\n# Plotting the NIFTY Price Series chart and Moving Averages below\n# plt.figure(figsize=(9, 5))\n# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')\n# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')\n# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')\n# plt.legend(loc=2, prop={'size': 11})\n# plt.grid(True)\n# plt.setp(plt.gca().get_xticklabels(), rotation=30)\n# plt.show()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import numpy as np
from board_specs import *
from board_components import *
import constants
import board_test
# List of resources available to be distributed on the board
RESOURCE_NAMES = constants.RESOURCE_NAMES
# Create a dictionary of each resource and a corresponding number id
res_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))
# List of available ports that can be distributed around the board
PORTS_NAMES = constants.PORTS_NAMES
# Create a dictionary of each port and a corresponding number id
port_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
# Array of each resource id number repeated the amount of times that
# the resource is available on the board.
# This will be used to distribute the resources into slots on the board
self.board_resources = np.array(
[res_dict["desert"]]
+ [res_dict["brick"]] * 3
+ [res_dict["ore"]] * 3
+ [res_dict["hay"]] * 4
+ [res_dict["wood"]] * 4
+ [res_dict["sheep"]] * 4
)
# Shuffle the resource array for randomized distribution
np.random.shuffle(self.board_resources)
# replace lines #42 and #44 with the following:
# self.roll_numbers = board_test.roll_numbers
# number associated with the desert and 0 can not actually be rolled
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12])
# shuffle number options
np.random.shuffle(self.roll_numbers)
# Array of the port ids, amount of times each port is available -
self.ports = np.array(
[port_dict["3:1"]] * 4
+ [port_dict["2brick:1"]]
+ [port_dict["2ore:1"]]
+ [port_dict["2hay:1"]]
+ [port_dict["2wood:1"]]
+ [port_dict["2sheep:1"]]
)
# shuffle the ports for randomized distribution
np.random.shuffle(self.ports)
# Zero_tile_nr will represent where the 0 number exists
zero_tile_nr = np.where(self.roll_numbers == 0)
# Desert_tile_nr will represent where the desert resource exists
desert_tile_nr = np.where(self.board_resources == res_dict["desert"])
# Robber will keep track of where the robber is and it starts in
# the desert. Robber will be an integer.
# Numpy returns a tuple of which the first is a list with the index.
# We'll extract it, and add 1 since terrain keys start at 1, not 0.
self.robber = desert_tile_nr[0][0] + 1
# as the desert tile and replace whatever was already in the desert
# tile into the empty zero tile
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr] =\
(self.board_resources[desert_tile_nr], self.board_resources[zero_tile_nr])
# The following code create the board objects: terrains, edges, intersections.
# Initialize a list for each attribute type.
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
# Assign the correct attributes for each attribute.
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
# A message, of how the board is displayed.
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
# Display each terrains; the identifying numbers correspond to
# the above diagram.
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
# The following methods will initialize all objects with default
# arguments; their attribute objects will be reassigned later. This
# is because the objects refer each other as attributes, and they
# must exist before being assigned. The objects will be stored in a
# dictionary, with reference numbers as keys.
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
# The following method will assign the correct attributes for each
# object. It does not matter if the object that's assigned already
# has it's own attributes referred to properly, or if it will be
# assigned later. The pointers remain unchanged, and all objects
# will have their proper attributes. This circular relationship is
# interesting. An object's attribute's attribute can be the initial
# object.
def assign_specs(self) -> None:
# First, it loops through the list of terrains from the board_specs
# file. The first item is the key/identifier. Then there are two
# tuples: the intersections, and the edges.
for item in terrains_specs:
# Create a local variable to hold the edges for this terrain.
local_egdes = []
for subitem in item[1]:
# Each integer in the tuple refers to a key in the edges
# dictionary. This edge will be added to the list.
# Additionally, this edge's terrains attribute will be updated
# to hold the terrain we're working on now.
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
# The same process is repeated for the intersections.
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[item[0]])
# The local lists are converted to tuples and passed to the terrain.
self.terrains[item[0]].edges = (tuple(local_egdes))
self.terrains[item[0]].intersections = (tuple(local_intersections))
# Assign the last landscape and resource number. (The lists
# were shuffled, so it's random.) I deduct 1 from the list index,
# since the dictionary uses keys starting at 1, and lists start at 0.
self.terrains[item[0]].resource = self.board_resources[item[0]-1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0]-1]
# Using the next list from the board_specs file, the intersections and
# edges will reference each other. Additionally, the ports will be added.
for item in intersections_specs:
# It uses the same method as above: loops throught he intersections
# to add a list of edges, and adds self to the edge being processed.
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections[item[0]])
self.intersections[item[0]].edges = local_egdes
# If that item contains a port, assign it here.
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
# Create and display the board object.
def main():
b = Board()
print(b)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "ee22d6226f734c67be91a3ccf1c8c0024bb7dc08",
"index": 5818,
"step-1": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nfrom board_specs import *\nfrom board_components import *\nimport constants\nimport board_test\nRESOURCE_NAMES = constants.RESOURCE_NAMES\nres_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))\nPORTS_NAMES = constants.PORTS_NAMES\nport_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import numpy as np\nfrom board_specs import *\nfrom board_components import *\nimport constants\nimport board_test\n\n\n# List of resources available to be distributed on the board\nRESOURCE_NAMES = constants.RESOURCE_NAMES\n# Create a dictionary of each resource and a corresponding number id\nres_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))\n# List of available ports that can be distributed around the board\nPORTS_NAMES = constants.PORTS_NAMES\n# Create a dictionary of each port and a corresponding number id\nport_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))\n\n\nclass Board:\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n # Array of each resource id number repeated the amount of times that\n # the resource is available on the board.\n # This will be used to distribute the resources into slots on the board\n self.board_resources = np.array(\n [res_dict[\"desert\"]]\n + [res_dict[\"brick\"]] * 3\n + [res_dict[\"ore\"]] * 3\n + [res_dict[\"hay\"]] * 4\n + [res_dict[\"wood\"]] * 4\n + [res_dict[\"sheep\"]] * 4\n )\n # Shuffle the resource array for randomized distribution\n np.random.shuffle(self.board_resources)\n \n # replace lines #42 and #44 with the following:\n # self.roll_numbers = board_test.roll_numbers\n \n # number associated with the desert and 0 can not actually be rolled\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12])\n # shuffle number options\n np.random.shuffle(self.roll_numbers)\n \n # Array of the port ids, amount of times each port is available -\n self.ports = np.array(\n [port_dict[\"3:1\"]] * 4\n + [port_dict[\"2brick:1\"]]\n + [port_dict[\"2ore:1\"]]\n + [port_dict[\"2hay:1\"]]\n + [port_dict[\"2wood:1\"]]\n + [port_dict[\"2sheep:1\"]]\n )\n # shuffle the ports for randomized distribution\n np.random.shuffle(self.ports)\n # Zero_tile_nr will represent where the 0 number exists\n zero_tile_nr = np.where(self.roll_numbers == 0)\n # Desert_tile_nr will represent where the desert resource exists\n desert_tile_nr = np.where(self.board_resources == res_dict[\"desert\"])\n # Robber will keep track of where the robber is and it starts in\n # the desert. Robber will be an integer.\n # Numpy returns a tuple of which the first is a list with the index.\n # We'll extract it, and add 1 since terrain keys start at 1, not 0.\n self.robber = desert_tile_nr[0][0] + 1\n # as the desert tile and replace whatever was already in the desert\n # tile into the empty zero tile\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr] =\\\n (self.board_resources[desert_tile_nr], self.board_resources[zero_tile_nr])\n\n # The following code create the board objects: terrains, edges, intersections.\n\n # Initialize a list for each attribute type.\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n # Assign the correct attributes for each attribute.\n self.assign_specs()\n\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n # A message, of how the board is displayed.\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n # Display each terrains; the identifying numbers correspond to\n # the above diagram.\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n # The following methods will initialize all objects with default\n # arguments; their attribute objects will be reassigned later. This\n # is because the objects refer each other as attributes, and they\n # must exist before being assigned. The objects will be stored in a\n # dictionary, with reference numbers as keys.\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n # The following method will assign the correct attributes for each\n # object. It does not matter if the object that's assigned already\n # has it's own attributes referred to properly, or if it will be\n # assigned later. The pointers remain unchanged, and all objects\n # will have their proper attributes. This circular relationship is\n # interesting. An object's attribute's attribute can be the initial\n # object.\n def assign_specs(self) -> None:\n # First, it loops through the list of terrains from the board_specs\n # file. The first item is the key/identifier. Then there are two\n # tuples: the intersections, and the edges.\n for item in terrains_specs:\n # Create a local variable to hold the edges for this terrain.\n local_egdes = []\n for subitem in item[1]:\n # Each integer in the tuple refers to a key in the edges\n # dictionary. This edge will be added to the list.\n # Additionally, this edge's terrains attribute will be updated\n # to hold the terrain we're working on now.\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n\n # The same process is repeated for the intersections.\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[item[0]])\n\n # The local lists are converted to tuples and passed to the terrain.\n self.terrains[item[0]].edges = (tuple(local_egdes))\n self.terrains[item[0]].intersections = (tuple(local_intersections))\n\n # Assign the last landscape and resource number. (The lists\n # were shuffled, so it's random.) I deduct 1 from the list index,\n # since the dictionary uses keys starting at 1, and lists start at 0.\n self.terrains[item[0]].resource = self.board_resources[item[0]-1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0]-1]\n\n # Using the next list from the board_specs file, the intersections and\n # edges will reference each other. Additionally, the ports will be added.\n for item in intersections_specs:\n # It uses the same method as above: loops throught he intersections\n # to add a list of edges, and adds self to the edge being processed.\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections[item[0]])\n\n self.intersections[item[0]].edges = local_egdes\n # If that item contains a port, assign it here.\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\n# Create and display the board object.\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
# Дано натуральное число. Требуется определить,
# является ли год с данным номером високосным.
# Если год является високосным, то выведите `YES`, иначе выведите `NO`.
# Напомним, что в соответствии с григорианским календарем, год является високосным,
# если его номер кратен 4, но не кратен 100, а также если он кратен 400.
year = int(input('введите год '))
if year % 4 == 0 and not year % 100 == 0:
print('YES')
elif year % 400 == 0:
print('yes')
else:
print('NO')
|
normal
|
{
"blob_id": "99e6e734c7d638e3cf4d50d9605c99d5e700e82a",
"index": 1699,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-3": "year = int(input('введите год '))\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-4": "# Дано натуральное число. Требуется определить,\n# является ли год с данным номером високосным.\n# Если год является високосным, то выведите `YES`, иначе выведите `NO`.\n# Напомним, что в соответствии с григорианским календарем, год является високосным,\n# если его номер кратен 4, но не кратен 100, а также если он кратен 400.\n\nyear = int(input('введите год '))\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:utf-8 -*-
from src.Client.Conf.config import *
class SaveConfigFile():
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
# 防御编程 若value不是string,转换则在这转换
if configMainName is None or configSubName is None:
return None
# 写回配置文件
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, "r+"))
# 打印debug日志
if DEBUG and SYSTEM_TOOLS_DEBUG:
print('{SYS}{MISSION_DEBUG} config has been save in file successfully')
except Exception as e:
# 打开错误日志文件
wrongFile = open('data/wrongMessage.dat', 'a+')
# 获取当前时间
currentTime = str(
datetime.datetime.strptime(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()), '%Y-%m-%d-%H-%M-%S'))
# 生成报错的错误信息
wrongMessage = {
'|currentTime': currentTime,
'|file': 'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName,
'|configSubName': configSubName,
'|value': value,
'|wrongMessage': str(e)
}
# 存入文件
wrongFile.write(str(wrongMessage))
# 增加换行符
wrongFile.write('\n')
wrongFile.close()
# 配置文件读取测试
if __name__ == '__main__':
s = SaveConfigFile(fileName='F:\python17\pythonPro\MemortAssit\conf\main.ini')
print(s.saveConfigFile('VERSION', 'version', 'v1.0'))
|
normal
|
{
"blob_id": "b61bb47f3e059c607447cea92ce1712825735822",
"index": 2373,
"step-1": "<mask token>\n\n\nclass SaveConfigFile:\n <mask token>\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\nif __name__ == '__main__':\n s = SaveConfigFile(fileName=\n 'F:\\\\python17\\\\pythonPro\\\\MemortAssit\\\\conf\\\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-4": "from src.Client.Conf.config import *\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\nif __name__ == '__main__':\n s = SaveConfigFile(fileName=\n 'F:\\\\python17\\\\pythonPro\\\\MemortAssit\\\\conf\\\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-5": "# -*- coding:utf-8 -*-\n\n\nfrom src.Client.Conf.config import *\n\n\nclass SaveConfigFile():\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n # 防御编程 若value不是string,转换则在这转换\n if configMainName is None or configSubName is None:\n return None\n # 写回配置文件\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, \"r+\"))\n # 打印debug日志\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print('{SYS}{MISSION_DEBUG} config has been save in file successfully')\n except Exception as e:\n # 打开错误日志文件\n wrongFile = open('data/wrongMessage.dat', 'a+')\n # 获取当前时间\n currentTime = str(\n datetime.datetime.strptime(time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n # 生成报错的错误信息\n wrongMessage = {\n '|currentTime': currentTime,\n '|file': 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName,\n '|configSubName': configSubName,\n '|value': value,\n '|wrongMessage': str(e)\n }\n # 存入文件\n wrongFile.write(str(wrongMessage))\n # 增加换行符\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n# 配置文件读取测试\nif __name__ == '__main__':\n s = SaveConfigFile(fileName='F:\\python17\\pythonPro\\MemortAssit\\conf\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def emphasize(sentence):
words = sentence.split(" ")
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return " ".join(words)
exp1 = "Hello World"
ans1 = emphasize("hello world")
assert ans1 == exp1, f"expected {exp1}, got {ans1}"
exp2 = "Good Morning"
ans2 = emphasize("GOOD MORNING")
assert ans2 == exp2, f"expected {exp2}, got {ans2}"
exp3 = "99 Red Balloons!"
ans3 = emphasize("99 red balloons!")
assert ans3 == exp3, f"expected {exp3}, got {ans3}"
print("everything okay")
|
normal
|
{
"blob_id": "518dcdca8f5e6b42624083e4327143dfba59b2ba",
"index": 9785,
"step-1": "<mask token>\n",
"step-2": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\n",
"step-3": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\n<mask token>\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\n<mask token>\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-4": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\nexp1 = 'Hello World'\nans1 = emphasize('hello world')\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\nexp2 = 'Good Morning'\nans2 = emphasize('GOOD MORNING')\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\nexp3 = '99 Red Balloons!'\nans3 = emphasize('99 red balloons!')\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-5": "def emphasize(sentence):\n words = sentence.split(\" \")\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return \" \".join(words)\n\n\nexp1 = \"Hello World\"\nans1 = emphasize(\"hello world\")\nassert ans1 == exp1, f\"expected {exp1}, got {ans1}\"\n\nexp2 = \"Good Morning\"\nans2 = emphasize(\"GOOD MORNING\")\nassert ans2 == exp2, f\"expected {exp2}, got {ans2}\"\n\nexp3 = \"99 Red Balloons!\"\nans3 = emphasize(\"99 red balloons!\")\nassert ans3 == exp3, f\"expected {exp3}, got {ans3}\"\n\nprint(\"everything okay\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyglet.gl import *
from pyglet.window import key
from cocos.actions import *
from cocos.director import director
from cocos.layer import Layer
from cocos.scene import Scene
from cocos.sprite import Sprite
from haiku import generate_haiku
from time import time
def get_steps(index):
return Scene(FontLayer(title="", subtitle='\n'.join(generate_haiku())), SpriteMoveTo(index))
class SpriteLayer(Layer):
is_event_handler = True #: enable pyglet's events
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
# LEFT: go to previous scene
# RIGTH: go to next scene
# ENTER: restart scene
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
# def on_exit( self ):
# for o in self.objects:
# o.stop()
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 +100 , y * 100 + 100
# sprite3.do(MoveTo((620, 300), 1))
class FontLayer(Layer):
def __init__(self, title="Sprite Exmaple #", subtitle="Goto()"):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title,
font_size=32,
x=5,
y=director.get_window_size()[1],
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle,
multiline=True,
width=600,
font_size=16,
x=5,
y=director.get_window_size()[1] - 80,
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_help = pyglet.text.Label("Press LEFT / RIGHT for prev/next test, "
"ENTER to restart test",
font_size=16,
x=director.get_window_size()[0] // 2,
y=20,
anchor_x='center',
anchor_y='center',
batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
if __name__ == "__main__":
director.init(resizable=True, caption='SuperStepper')
director.run(get_steps(1))
|
normal
|
{
"blob_id": "2678aac08104a580e866984bc4cf4adf8cb8ac5c",
"index": 5930,
"step-1": "<mask token>\n\n\nclass SpriteMoveTo(SpriteLayer):\n <mask token>\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\nif __name__ == '__main__':\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))\n",
"step-5": "from __future__ import division, print_function, unicode_literals\n\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n\nfrom pyglet.gl import *\nfrom pyglet.window import key\n\nfrom cocos.actions import *\nfrom cocos.director import director\nfrom cocos.layer import Layer\nfrom cocos.scene import Scene\nfrom cocos.sprite import Sprite\nfrom haiku import generate_haiku\n\nfrom time import time\n\ndef get_steps(index):\n \n return Scene(FontLayer(title=\"\", subtitle='\\n'.join(generate_haiku())), SpriteMoveTo(index))\n\nclass SpriteLayer(Layer):\n\n is_event_handler = True #: enable pyglet's events\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n # LEFT: go to previous scene\n # RIGTH: go to next scene\n # ENTER: restart scene\n max_steps = 8\n\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n # def on_exit( self ):\n # for o in self.objects:\n # o.stop()\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n\n sprite3.position = x * 100 +100 , y * 100 + 100\n # sprite3.do(MoveTo((620, 300), 1))\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title=\"Sprite Exmaple #\", subtitle=\"Goto()\"):\n super(FontLayer, self).__init__()\n\n self.title = title\n self.subtitle = subtitle\n\n self.batch = pyglet.graphics.Batch()\n\n self.text_title = pyglet.text.Label(self.title,\n font_size=32,\n x=5,\n y=director.get_window_size()[1],\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_subtitle = pyglet.text.Label(self.subtitle,\n multiline=True,\n width=600,\n font_size=16,\n x=5,\n y=director.get_window_size()[1] - 80,\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_help = pyglet.text.Label(\"Press LEFT / RIGHT for prev/next test, \"\n \"ENTER to restart test\",\n font_size=16,\n x=director.get_window_size()[0] // 2,\n y=20,\n anchor_x='center',\n anchor_y='center',\n batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n\nif __name__ == \"__main__\":\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))",
"step-ids": [
4,
9,
10,
11,
13
]
}
|
[
4,
9,
10,
11,
13
] |
import xml.parsers.expat
import urllib2
import threading
def check_url(checkurl, checkstring, checkname):
try:
opener = urllib2.urlopen(checkurl, timeout = 5)
if checkstring[0] == "!":
if checkstring.encode('utf-8')[1:] not in opener.read():
print "Open",checkname
else:
#print "Closed",checkname
pass
else:
if checkstring.encode('utf-8') in opener.read():
print "Open",checkname
else:
#print "Closed",checkname
pass
except IOError:
#print "Broken",checkname
pass
p = xml.parsers.expat.ParserCreate()
tname = ""
url = ""
check = ""
mode = ""
enabled = ""
def char_data(data):
global tname, url, check, mode, enabled
if mode == "name":
tname += data
elif mode == "check":
check += data
elif mode == "signup":
url += data
elif mode == "type":
enabled += data
def end_element(name):
global tname, url, check, mode, enabled
mode = ""
if name == "tracker" and enabled[0] == "T":
threading.Thread(target=check_url, args=(url, check, tname)).start()
tname = ""
url = ""
enabled = ""
check = ""
def start_element(name, attrs):
global tname, url, check, mode, enabled
if name == "name":
mode = "name"
elif name == "signup":
mode = "signup"
elif name == "check":
mode = "check"
elif name == "type":
mode = "type"
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
f = open("trackers.xml")
p.Parse(f.read(),1)
|
normal
|
{
"blob_id": "9d3d7000ed13a2623a53705d55b5dbb42662ce2f",
"index": 4296,
"step-1": "import xml.parsers.expat\nimport urllib2\nimport threading\n\n\n\ndef check_url(checkurl, checkstring, checkname):\n try:\n opener = urllib2.urlopen(checkurl, timeout = 5)\n if checkstring[0] == \"!\":\n if checkstring.encode('utf-8')[1:] not in opener.read():\n print \"Open\",checkname\n else:\n #print \"Closed\",checkname\n pass\n else:\n if checkstring.encode('utf-8') in opener.read():\n print \"Open\",checkname\n else:\n #print \"Closed\",checkname\n pass\n except IOError:\n #print \"Broken\",checkname\n pass\np = xml.parsers.expat.ParserCreate()\n\ntname = \"\"\nurl = \"\"\ncheck = \"\"\nmode = \"\"\nenabled = \"\"\n\ndef char_data(data):\n global tname, url, check, mode, enabled\n if mode == \"name\":\n tname += data\n elif mode == \"check\":\n check += data\n elif mode == \"signup\":\n url += data\n elif mode == \"type\":\n enabled += data\n \ndef end_element(name):\n global tname, url, check, mode, enabled\n mode = \"\"\n if name == \"tracker\" and enabled[0] == \"T\":\n threading.Thread(target=check_url, args=(url, check, tname)).start()\n tname = \"\"\n url = \"\"\n enabled = \"\"\n check = \"\"\n \n \ndef start_element(name, attrs):\n global tname, url, check, mode, enabled\n if name == \"name\":\n mode = \"name\"\n elif name == \"signup\":\n mode = \"signup\"\n elif name == \"check\":\n mode = \"check\"\n elif name == \"type\":\n mode = \"type\"\np.StartElementHandler = start_element\np.EndElementHandler = end_element\np.CharacterDataHandler = char_data\n\nf = open(\"trackers.xml\")\np.Parse(f.read(),1)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django import forms
from django.core.exceptions import ValidationError
from django.db import connection
from customer.helper_funcs import dictfetchall
class OrderForm(forms.Form):
item_id = forms.IntegerField(required=True)
quantity = forms.IntegerField(required=True)
def clean(self):
cleaned_data = super().clean()
item_id = cleaned_data.get("item_id")
quantity = cleaned_data.get("quantity")
if item_id and quantity:
cursor = connection.cursor()
query = "SELECT item_id, quantity FROM item"
cursor.execute(query + ";")
items = dictfetchall(cursor)
id_exists = False
for item in items:
if item["item_id"] == item_id:
id_exists = True
if item["quantity"] - quantity < 0:
raise ValidationError("Not enough units in stock.")
if not id_exists:
raise ValidationError("Id does not exist")
|
normal
|
{
"blob_id": "b32784bf398a58ba4b6e86fedcdc3ac9de0e8d51",
"index": 3137,
"step-1": "<mask token>\n\n\nclass OrderForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass OrderForm(forms.Form):\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_data = super().clean()\n item_id = cleaned_data.get('item_id')\n quantity = cleaned_data.get('quantity')\n if item_id and quantity:\n cursor = connection.cursor()\n query = 'SELECT item_id, quantity FROM item'\n cursor.execute(query + ';')\n items = dictfetchall(cursor)\n id_exists = False\n for item in items:\n if item['item_id'] == item_id:\n id_exists = True\n if item['quantity'] - quantity < 0:\n raise ValidationError('Not enough units in stock.')\n if not id_exists:\n raise ValidationError('Id does not exist')\n",
"step-3": "<mask token>\n\n\nclass OrderForm(forms.Form):\n item_id = forms.IntegerField(required=True)\n quantity = forms.IntegerField(required=True)\n\n def clean(self):\n cleaned_data = super().clean()\n item_id = cleaned_data.get('item_id')\n quantity = cleaned_data.get('quantity')\n if item_id and quantity:\n cursor = connection.cursor()\n query = 'SELECT item_id, quantity FROM item'\n cursor.execute(query + ';')\n items = dictfetchall(cursor)\n id_exists = False\n for item in items:\n if item['item_id'] == item_id:\n id_exists = True\n if item['quantity'] - quantity < 0:\n raise ValidationError('Not enough units in stock.')\n if not id_exists:\n raise ValidationError('Id does not exist')\n",
"step-4": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db import connection\nfrom customer.helper_funcs import dictfetchall\n\n\nclass OrderForm(forms.Form):\n item_id = forms.IntegerField(required=True)\n quantity = forms.IntegerField(required=True)\n\n def clean(self):\n cleaned_data = super().clean()\n item_id = cleaned_data.get('item_id')\n quantity = cleaned_data.get('quantity')\n if item_id and quantity:\n cursor = connection.cursor()\n query = 'SELECT item_id, quantity FROM item'\n cursor.execute(query + ';')\n items = dictfetchall(cursor)\n id_exists = False\n for item in items:\n if item['item_id'] == item_id:\n id_exists = True\n if item['quantity'] - quantity < 0:\n raise ValidationError('Not enough units in stock.')\n if not id_exists:\n raise ValidationError('Id does not exist')\n",
"step-5": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db import connection\nfrom customer.helper_funcs import dictfetchall\n\n\nclass OrderForm(forms.Form):\n item_id = forms.IntegerField(required=True)\n quantity = forms.IntegerField(required=True)\n\n def clean(self):\n cleaned_data = super().clean()\n item_id = cleaned_data.get(\"item_id\")\n quantity = cleaned_data.get(\"quantity\")\n\n if item_id and quantity:\n cursor = connection.cursor()\n query = \"SELECT item_id, quantity FROM item\"\n cursor.execute(query + \";\")\n items = dictfetchall(cursor)\n id_exists = False\n for item in items:\n if item[\"item_id\"] == item_id:\n id_exists = True\n if item[\"quantity\"] - quantity < 0:\n raise ValidationError(\"Not enough units in stock.\")\n\n if not id_exists:\n raise ValidationError(\"Id does not exist\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from sklearn import preprocessing
from random import shuffle
import numpy as np
import collections
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras import backend as K
from gensim.models.keyedvectors import KeyedVectors
from nltk.tokenize import TreebankWordTokenizer
import re
import pickle
import os
import yaml
import pandas
from typing import List
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import losses, optimizers
from early_stopping import EarlyStoppingAtMaxMacroF1
import json
import hashlib
SEED = 7
def read_csv_json(file_name) -> pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\W^\'\"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):
vectorized_data = []
# probably could be optimized further
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
# print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
# Create a vector of 0s the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
# Append the appropriate number 0 vectors to the list
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
'''
save model based on model, encoder
'''
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, "w") as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, "log.json"), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
#le = preprocessing.LabelEncoder()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise ("Session is not initialized")
if graph is None:
raise ("Graph is not initialized")
if model is None:
raise ("Model is not initialized")
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return (probs, preds)
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
(x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.categories_[0]))
model.compile(
loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),
#metrics=['categorical_accuracy'],
optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001
#optimizer=optimizers.Adam(learning_rate=5e-4)
)
# early stopping callback using validation loss
callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=5,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
)
#callback = EarlyStoppingAtMaxMacroF1(
# patience=100, # record all epochs
# validation=(x_va, y_va)
#)
print('start training')
history = model.fit(x_train, y_train,
batch_size=self.model_cfg['batch_size'],
epochs=100,
validation_split=va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else None,
callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(f'finished training in {len(history.history["loss"])} epochs')
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
# return training history
return history.history
def __preprocess(self, dataset, le_encoder=None):
'''
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
'''
shuffle(dataset)
data = [s['data'] for s in dataset]
#labels = [s['label'] for s in dataset]
labels = [[s['label']] for s in dataset]
#le_encoder = preprocessing.LabelEncoder()
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))
#print('train %s intents with %s samples' % (len(set(labels)), len(data)))
#print(collections.Counter(labels))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])
# split_point = int(len(vectorized_data) * .9)
x_train = vectorized_data # vectorized_data[:split_point]
y_train = encoded_labels # encoded_labels[:split_point]
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(
filters,
kernel_size,
padding='valid',
activation=activation_type,
strides=strides,
input_shape=(maxlen, embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
(model, le) = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
(probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i],
'embeddings': x_train[i],
#'label': r,
'label': r.item(),
'highestProb': max(probs[i]),
#'prob': dict(zip(self.le_encoder.classes_, probs[i]))
'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))
} for i, r in enumerate(results)]
return output
|
normal
|
{
"blob_id": "23f491bbf26ede9052ecdab04b8c00cc78db5a7e",
"index": 8831,
"step-1": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-2": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-3": "<mask token>\ntf.disable_v2_behavior()\n<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-4": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\nSEED = 7\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-5": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\n\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\n\nSEED = 7\n\n\ndef read_csv_json(file_name) -> pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\W^\\'\\\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):\n vectorized_data = []\n # probably could be optimized further\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n # print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n # Create a vector of 0s the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n # Append the appropriate number 0 vectors to the list\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n '''\n save model based on model, encoder\n '''\n\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, \"w\") as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, \"log.json\"), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, \"r\") as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n #le = preprocessing.LabelEncoder()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise (\"Session is not initialized\")\n if graph is None:\n raise (\"Graph is not initialized\")\n if model is None:\n raise (\"Model is not initialized\")\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return (probs, preds)\n\n\nclass Model:\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n (x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.categories_[0]))\n model.compile(\n loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),\n #metrics=['categorical_accuracy'],\n optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001\n #optimizer=optimizers.Adam(learning_rate=5e-4)\n )\n # early stopping callback using validation loss \n callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=5,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True,\n )\n #callback = EarlyStoppingAtMaxMacroF1(\n # patience=100, # record all epochs\n # validation=(x_va, y_va)\n #)\n\n print('start training')\n history = model.fit(x_train, y_train,\n batch_size=self.model_cfg['batch_size'],\n epochs=100,\n validation_split=va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else None,\n callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(f'finished training in {len(history.history[\"loss\"])} epochs')\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n # return training history \n return history.history\n \n def __preprocess(self, dataset, le_encoder=None):\n '''\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n '''\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n #labels = [s['label'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n #le_encoder = preprocessing.LabelEncoder()\n if le_encoder is None: \n le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))\n #print('train %s intents with %s samples' % (len(set(labels)), len(data)))\n #print(collections.Counter(labels))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])\n\n # split_point = int(len(vectorized_data) * .9)\n x_train = vectorized_data # vectorized_data[:split_point]\n y_train = encoded_labels # encoded_labels[:split_point]\n\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n\n model.add(Conv1D(\n filters,\n kernel_size,\n padding='valid',\n activation=activation_type,\n strides=strides,\n input_shape=(maxlen, embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n (model, le) = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n (probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i],\n 'embeddings': x_train[i],\n #'label': r,\n 'label': r.item(),\n 'highestProb': max(probs[i]),\n #'prob': dict(zip(self.le_encoder.classes_, probs[i]))\n 'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))\n } for i, r in enumerate(results)]\n return output\n",
"step-ids": [
9,
13,
16,
18,
19
]
}
|
[
9,
13,
16,
18,
19
] |
"""
r - reading fike
w - writing to file
a - append to file / add to the end of the file - always at the end
r+ - read and write to file (writing based on python cursor position) -> by default at the beginning of file -> won't insert and shift things over,
will overwrite the contents. -> r+ can only be used with already existing files.
"""
with open("haiku.txt", "w") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("haiku.txt", "a") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("existing_file.txt", "r+") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
|
normal
|
{
"blob_id": "cde2454c68a0d6a0c86b7d647e41a86d3aa97a0d",
"index": 8267,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('haiku.txt', 'w') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\nwith open('haiku.txt', 'a') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\nwith open('existing_file.txt', 'r+') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\n",
"step-3": "\"\"\"\nr - reading fike\nw - writing to file\na - append to file / add to the end of the file - always at the end\nr+ - read and write to file (writing based on python cursor position) -> by default at the beginning of file -> won't insert and shift things over,\nwill overwrite the contents. -> r+ can only be used with already existing files.\n\n\"\"\"\n\nwith open(\"haiku.txt\", \"w\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")\n\nwith open(\"haiku.txt\", \"a\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")\n\nwith open(\"existing_file.txt\", \"r+\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# PDE:
# add_library('hype')
# processing.py:
from hype.core.util import H
from hype.core.interfaces import HCallback
from hype.extended.behavior import HOscillator
from hype.extended.drawable import HCanvas, HRect
from hype.extended.layout import HGridLayout
from hype.extended.util import HDrawablePool
from random import choice
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 0x406B2B24 # #6B2B24
color2 = 0xc4831521 # #831521
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(0xffE0DFE2) # #E0DFE2
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas)\
.add(HRect()
.size(rectRadius * 2)
.noStroke())\
.layout(HGridLayout()
.startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)
.spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)
.cols(5))\
.onCreate(Callback())\
.requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER)\
.fill(choice([color1, color2]))
HOscillator()\
.target(drawable)\
.property(H.ROTATION)\
.range(-5, 5)\
.speed(1)\
.freq(4)\
.currentStep(pool.currentIndex() * random(2, 25))
|
normal
|
{
"blob_id": "b8a41c56a31acab0181ec364f76010ac12119074",
"index": 5489,
"step-1": "<mask token>\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-2": "<mask token>\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-3": "<mask token>\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-4": "from hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\nfrom random import choice\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-5": "# PDE:\n# add_library('hype')\n# processing.py:\nfrom hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\n\nfrom random import choice\n\n\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 0x406B2B24 # #6B2B24\ncolor2 = 0xc4831521 # #831521\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(0xffE0DFE2) # #E0DFE2\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas)\\\n .add(HRect()\n .size(rectRadius * 2)\n .noStroke())\\\n .layout(HGridLayout()\n .startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)\n .spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)\n .cols(5))\\\n .onCreate(Callback())\\\n .requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER)\\\n .fill(choice([color1, color2]))\n HOscillator()\\\n .target(drawable)\\\n .property(H.ROTATION)\\\n .range(-5, 5)\\\n .speed(1)\\\n .freq(4)\\\n .currentStep(pool.currentIndex() * random(2, 25))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import sys
import HTSeq
import re
import string
import glob
import os
import time
import difflib
import argparse
def parse_input():
parser = argparse.ArgumentParser(description="""
USAGE: python make_figs.py -f data_file
""")
# If the -b option is used, tRNAs with no tails are not counted.
# This speeds up the removal of duplicates for large datasets
#parser.add_option("-b", "--blanks", action="store_false", dest="includeBlankTails", default=True)
parser.add_argument("-f", "--data_file", action="store",
dest="data_file",
help="Filename of data.")
args = parser.parse_args()
return args
def write_most_common_tails(inserts, base_filename, control=False):
for exp in inserts:
with open("%s_%s" % (base_filename,
os.path.basename(exp).rstrip('.inserts').rstrip(
'.fastq')),
'w') as f:
if(not control):
lines = inserts[exp].write_table_of_most_common_tails(control)
if(control):
lines = inserts[exp].write_table_of_most_common_tails(
control, get_pvalues=True)
f.write(lines)
def parse_data_file(filename):
data = {}
print "Opening %s with file size %i..." % (
filename, os.path.getsize(filename))
with open(filename, 'r') as f:
dataset = ""
for li in f:
#print li
s = li.strip('\n').split('\t')
m = re.match(r'number tails in ([^:]+):.*', li)
if(m is not None):
dataset = m.group(1)
dataset = os.path.basename(dataset)
cur_dataset = dataset
data[dataset] = {'n_tails': s[1:]}
continue
m = re.match(r'([AGCTN]):.*', s[0])
if(m is not None):
data[dataset][m.group(1)] = s[1:]
continue
m = re.match(r'tail length:.*', li)
if(m is not None):
data[dataset]['tail_len'] = s[1:]
continue
m = re.match(r'.*Number of unique.*', li)
if(m is not None):
data[dataset]['n_unique'] = s[1:]
continue
return data
def check_data_agreement(data):
for exp in data:
max_range = min(len(data[exp]['n_tails']),
len(data[exp]['tail_len']),
len(data[exp]['n_unique']))
n_tails = 0
for index in range(1, max_range-1):
try:
n_tails += float(data[exp]['n_tails'][index])
except:
print "Error at %s, %i" % (exp, index)
print "%s: total tails=%f" % (exp, n_tails)
def write_for_R(data, src_path):
src_path = os.path.dirname(os.path.realpath(__file__))
files_for_R = list()
check_data_agreement(data)
for exp in data:
with open("%s/figs/%s.forR" % (
src_path, exp.rstrip('.fastq.inserts')
), 'w') as f:
li = "tail_len\tn_tails\tn_unique\tA\tC\tT\tG\n"
max_range = min(len(data[exp]['n_tails']),
len(data[exp]['tail_len']),
len(data[exp]['n_unique']))
for index in range(0, max_range):
li += "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (
data[exp]['tail_len'][index],
data[exp]['n_tails'][index],
data[exp]['n_unique'][index],
data[exp]['A'][index],
data[exp]['C'][index],
data[exp]['T'][index],
data[exp]['G'][index])
f.write(li)
files_for_R.append("%s/figs/%s.forR" % (
src_path, exp.rstrip('.fastq.inserts')))
return files_for_R
def r_script_for_barplot(files_for_R, src_path):
for filename in files_for_R:
li = """
f = read.table("%s", head=T)""" % filename
li += """
bases = as.data.frame(cbind(f$A, f$C, f$T, f$G))
m = as.matrix(bases)
outfname = "%s/figs/barplot_%s.eps"
""" % (src_path, os.path.basename(filename))
li += r'''
library(RColorBrewer)
my_cols <- brewer.pal(4, "RdBu")
setEPS(width=5,height=3); postscript(outfname)
barplot(t(m), xlab = 'Tail length',
ylab = 'Percent base composition',
legend=c('A','C','T','G'), col=my_cols)
dev.off()
'''
li += """
outfname = "%s/figs/plot_%s.eps"
""" % (src_path, os.path.basename(filename))
li += r'''
library(RColorBrewer)
my_cols <- brewer.pal(4, "RdBu")
setEPS(width=5,height=10); postscript(outfname)
par(mfrow=c(3,1))
plot(f$n_tails, x=f$tail_len, type='l', xlab='Tail length',
ylab='Number of tails')
plot(f$n_unique, x=f$tail_len, type='l', xlab='Tail length',
ylab='Number of unique tails')
barplot(t(m), xlab = 'Tail length',
ylab = 'Percent base composition',
legend=c('A','C','T','G'), col=my_cols)
dev.off()
'''
with open('tmp.r', 'w') as f:
f.write(li)
cmdl = """R CMD BATCH tmp.r"""
os.system(cmdl)
def make_figs(data_filename, src_path):
print "In make_figs. Processing file %s" % data_filename
data = parse_data_file(data_filename)
if(not os.path.exists(src_path + "/figs")):
print "making %s/figs" % src_path
os.system("mkdir %s/figs" % src_path)
files_for_R = write_for_R(data, src_path)
r_script_for_barplot(files_for_R, src_path)
if __name__ == '__main__':
src_path = os.path.dirname(os.path.realpath(__file__))
args = parse_input()
data = parse_data_file(args.data_file)
if(not os.path.exists(src_path + '/figs')):
os.system('mkdir ' + src_path + '/figs')
files_for_R = write_for_R(data)
r_script_for_barplot(files_for_R)
|
normal
|
{
"blob_id": "05f5931a53c9916f151f42910575f9c5533bfceb",
"index": 9921,
"step-1": "import sys\nimport HTSeq\nimport re\nimport string\nimport glob\nimport os\nimport time\nimport difflib\nimport argparse\n\n\ndef parse_input():\n parser = argparse.ArgumentParser(description=\"\"\"\n USAGE: python make_figs.py -f data_file\n \"\"\")\n\n # If the -b option is used, tRNAs with no tails are not counted.\n # This speeds up the removal of duplicates for large datasets\n #parser.add_option(\"-b\", \"--blanks\", action=\"store_false\", dest=\"includeBlankTails\", default=True)\n\n parser.add_argument(\"-f\", \"--data_file\", action=\"store\",\n dest=\"data_file\",\n help=\"Filename of data.\")\n args = parser.parse_args()\n return args\n\n\ndef write_most_common_tails(inserts, base_filename, control=False):\n for exp in inserts:\n with open(\"%s_%s\" % (base_filename,\n os.path.basename(exp).rstrip('.inserts').rstrip(\n '.fastq')),\n 'w') as f:\n if(not control):\n lines = inserts[exp].write_table_of_most_common_tails(control)\n if(control):\n lines = inserts[exp].write_table_of_most_common_tails(\n control, get_pvalues=True)\n f.write(lines)\n\n\ndef parse_data_file(filename):\n data = {}\n print \"Opening %s with file size %i...\" % (\n filename, os.path.getsize(filename))\n with open(filename, 'r') as f:\n dataset = \"\"\n for li in f:\n #print li\n s = li.strip('\\n').split('\\t')\n m = re.match(r'number tails in ([^:]+):.*', li)\n if(m is not None):\n dataset = m.group(1)\n dataset = os.path.basename(dataset)\n cur_dataset = dataset\n data[dataset] = {'n_tails': s[1:]}\n continue\n m = re.match(r'([AGCTN]):.*', s[0])\n if(m is not None):\n data[dataset][m.group(1)] = s[1:]\n continue\n m = re.match(r'tail length:.*', li)\n if(m is not None):\n data[dataset]['tail_len'] = s[1:]\n continue\n m = re.match(r'.*Number of unique.*', li)\n if(m is not None):\n data[dataset]['n_unique'] = s[1:]\n continue\n return data\n \n\ndef check_data_agreement(data):\n for exp in data:\n max_range = min(len(data[exp]['n_tails']),\n len(data[exp]['tail_len']),\n len(data[exp]['n_unique']))\n n_tails = 0\n for index in range(1, max_range-1):\n try:\n n_tails += float(data[exp]['n_tails'][index])\n except:\n print \"Error at %s, %i\" % (exp, index)\n print \"%s: total tails=%f\" % (exp, n_tails)\n \n\ndef write_for_R(data, src_path):\n src_path = os.path.dirname(os.path.realpath(__file__))\n files_for_R = list()\n check_data_agreement(data)\n for exp in data:\n with open(\"%s/figs/%s.forR\" % (\n src_path, exp.rstrip('.fastq.inserts')\n ), 'w') as f:\n li = \"tail_len\\tn_tails\\tn_unique\\tA\\tC\\tT\\tG\\n\"\n max_range = min(len(data[exp]['n_tails']),\n len(data[exp]['tail_len']),\n len(data[exp]['n_unique']))\n for index in range(0, max_range):\n li += \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (\n data[exp]['tail_len'][index],\n data[exp]['n_tails'][index],\n data[exp]['n_unique'][index],\n data[exp]['A'][index],\n data[exp]['C'][index],\n data[exp]['T'][index],\n data[exp]['G'][index])\n f.write(li)\n files_for_R.append(\"%s/figs/%s.forR\" % (\n src_path, exp.rstrip('.fastq.inserts')))\n return files_for_R\n\n\ndef r_script_for_barplot(files_for_R, src_path):\n for filename in files_for_R:\n li = \"\"\"\n f = read.table(\"%s\", head=T)\"\"\" % filename\n li += \"\"\"\n bases = as.data.frame(cbind(f$A, f$C, f$T, f$G))\n m = as.matrix(bases)\n outfname = \"%s/figs/barplot_%s.eps\"\n \"\"\" % (src_path, os.path.basename(filename))\n li += r'''\n library(RColorBrewer)\n my_cols <- brewer.pal(4, \"RdBu\")\n setEPS(width=5,height=3); postscript(outfname)\n barplot(t(m), xlab = 'Tail length',\n ylab = 'Percent base composition',\n legend=c('A','C','T','G'), col=my_cols)\n dev.off()\n '''\n li += \"\"\"\n outfname = \"%s/figs/plot_%s.eps\"\n\"\"\" % (src_path, os.path.basename(filename))\n li += r'''\n library(RColorBrewer)\n my_cols <- brewer.pal(4, \"RdBu\")\n setEPS(width=5,height=10); postscript(outfname)\n par(mfrow=c(3,1))\n plot(f$n_tails, x=f$tail_len, type='l', xlab='Tail length',\n ylab='Number of tails')\n plot(f$n_unique, x=f$tail_len, type='l', xlab='Tail length',\n ylab='Number of unique tails')\n barplot(t(m), xlab = 'Tail length',\n ylab = 'Percent base composition',\n legend=c('A','C','T','G'), col=my_cols)\n dev.off()\n '''\n with open('tmp.r', 'w') as f:\n f.write(li)\n cmdl = \"\"\"R CMD BATCH tmp.r\"\"\"\n os.system(cmdl)\n\n\ndef make_figs(data_filename, src_path):\n print \"In make_figs. Processing file %s\" % data_filename\n data = parse_data_file(data_filename)\n if(not os.path.exists(src_path + \"/figs\")):\n print \"making %s/figs\" % src_path\n os.system(\"mkdir %s/figs\" % src_path)\n files_for_R = write_for_R(data, src_path)\n r_script_for_barplot(files_for_R, src_path)\n\n \nif __name__ == '__main__':\n src_path = os.path.dirname(os.path.realpath(__file__))\n args = parse_input()\n data = parse_data_file(args.data_file)\n if(not os.path.exists(src_path + '/figs')):\n os.system('mkdir ' + src_path + '/figs')\n files_for_R = write_for_R(data)\n r_script_for_barplot(files_for_R)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
import sucessor
import expande
from collections import deque
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque() # iremos empilhar as acoes já que a estaremos com a ordem reversa a priori
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = "12345678_"
custo_maximo_absoluto = 100 #profundedade maxima tolerada
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto: #se a profundedade maxima atual é maior do que a profundedade maxima tolerada retorna -1 pois provavelmente não existe uma solução
return -1
while True:
if not fronteira: # Se a fronteira esta vazia
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1) #executa a função novamente mas dessa vez com uma profundedade maxima maior
v = fronteira.pop() #pop em vez de popleft para tratar a fronteira como pilha
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
# Cada estado atingível a partir de v é acrescentado à fronteira caso a profundidade dos novos estados não exceda a profundidade máxima
if (v.custo + 1) < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
#como eu não queria ter que modificar as classes que já existiam, usei o custo de cada estado como um sinônimo de profundidade, já que os novos estados sempre tem custo = custo do pai + 1
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, "", custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end = " ")
print()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a85a7ad6ffb2b9aa5f5326d11c75ddbee680fac4",
"index": 673,
"step-1": "<mask token>\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport sucessor\nimport expande\nfrom collections import deque\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\nimport sucessor\nimport expande\nfrom collections import deque\n\ndef busca_caminho(nodo_final, nodo_inicial):\n\tpilha_acoes = deque() # iremos empilhar as acoes já que a estaremos com a ordem reversa a priori\n\tv = nodo_final\n\twhile v != nodo_inicial:\n\t\tpilha_acoes.append(v.acao)\n\t\tv = v.pai\n\treturn pilha_acoes\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n\tobjetivo = \"12345678_\"\n\tcusto_maximo_absoluto = 100 #profundedade maxima tolerada\n\texplorados = set()\n\tfronteira = deque()\n\tfronteira.append(nodo_inicial)\n\tif custo_maximo_atual > custo_maximo_absoluto: #se a profundedade maxima atual é maior do que a profundedade maxima tolerada retorna -1 pois provavelmente não existe uma solução\n\t\treturn -1\n\twhile True:\n\t\tif not fronteira: # Se a fronteira esta vazia\n\t\t\texplorados = None\n\t\t\treturn busca_dfs(nodo_inicial, custo_maximo_atual + 1) #executa a função novamente mas dessa vez com uma profundedade maxima maior\n\t\tv = fronteira.pop() #pop em vez de popleft para tratar a fronteira como pilha\n\t\tif v.estado == objetivo:\n\t\t\treturn busca_caminho(v, nodo_inicial)\n\t\tif v not in explorados:\n\t\t\texplorados.add(v)\n\t\t\testados_sucessores = sucessor.sucessor(v.estado)\n\t\t\t# Cada estado atingível a partir de v é acrescentado à fronteira caso a profundidade dos novos estados não exceda a profundidade máxima\n\t\t\tif (v.custo + 1) < custo_maximo_atual:\n\t\t\t\tfor e in estados_sucessores:\n\t\t\t\t\tfilho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n\t\t\t\t\tfronteira.append(filho)\n\ndef main():\n\t#como eu não queria ter que modificar as classes que já existiam, usei o custo de cada estado como um sinônimo de profundidade, já que os novos estados sempre tem custo = custo do pai + 1\n\testado_inicial = sys.argv[1]\n\tcusto_inicial = 0\n\tpai = expande.Nodo(estado_inicial, 0, \"\", custo_inicial)\n\tcaminho = busca_dfs(pai, 1)\n\n\twhile caminho:\n\t\tprint(caminho.pop(), end = \" \")\n\tprint()\n\nif __name__ == '__main__':\n\tmain()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 18:39:26 2020
@author: Fanny Fredriksson and Karen Marie Sandø Ambrosen
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm #count ffor loops
import math
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn import preprocessing
from sklearn.utils import shuffle
from sklearn.linear_model import Lasso
from utils_runOnce_classification import getEgillX, getEgillParameters
from utils_runOnce_classification import significant_connected_areasBAitaSigX, getBAitaSigParameters, getBAitaParameters
import seaborn as sns
from utils_joint import getNewestFolderDate, get_Xy
import pdb
#{}
#[]
##############################################################################
def leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,
freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig = None):
"""
Calculates the leave K out cross validation.
Parameters
----------
X : array of arrays
Matrix containing a vector with all the features for each subject.
Dimension (number of subjects)x(number of features).
y : array
A vector containing the class-information.
Remember: 1 = healty controls, 0 = schizophrenic
n_scz_te : int
Desired number of schizophrenic patients in each test set.
rep : integer
The number of repition that has been used so far.
perms : range(*)
Range with desired number (*) of permutations.
*=1 indicates no permutations.
classifiers : dictionary
Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}
parameters : dictionary
Dictionary containing parameters to the classifiers as in "classifiers"
count : integer
Used to know how many loops that have been made due to the pre
allocated space for AUC.
freq_bands : list of strings
Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].
x_size : integer
The size each X has which changes depending on freq_bands.
auc : dictionary
Contains the auc-scores for each loop, either divided into bands or
with the key "all".
nz_coef_idx : dictionary
Contains the non-zero coefficient indices for each loop, either
divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the non-zero coefficient values (the weights) for each
loop, either divided into bands or with the key "all".
n_BAitaSig : list of integers, optional
The number of connections in each band when BAitaSig is used.
The default is None.
Returns
-------
auc : dictionary
Contains the updated auc-scores for each loop, either divided into
bands or with the key "all".
nz_coef_idx : dictionary
Contains the updated non-zero coefficient indices for each loop,
either divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the updated non-zero coefficient values (the weights) for
each loop, either divided into bands or with the key "all".
count : integer
Used to know how many loops that have been made due to the pre
allocated space for AUC.
"""
skf = StratifiedKFold(n_splits=int(sum(y==0)//n_scz_te),shuffle=True, random_state = rep)
count_plt = 0
fig, ax = plt.subplots(2,3 , figsize=(10,6.5))
for tr_idx, te_idx in skf.split(X,y):
# Compute test and train targets
y_tr = np.ravel(y[tr_idx])
y_te = np.ravel(y[te_idx])
# Make gridsearch function
clf_name = list(classifiers.keys())[0]
count += 1
sns.set(font_scale=1.5)
for i in range(1): #range(len(freq_bands)):
if count_plt == 6:
plt.suptitle('Example of line search for the regularization parameter', fontsize= 18)
plt.tight_layout()
plt.subplots_adjust(top = 0.84, bottom = 0.15, hspace = 0.5, wspace = 0.45)
fig.legend(['Train', 'Validation'], bbox_to_anchor = (0.5, 0.89),
borderaxespad = 0., loc = 'upper center', ncol = 2)
plt.show()
fig.savefig('/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg', bbox_inches = 'tight')
sns.reset_orig()
raise NameError('This is just a dumb way of stopping the code after 6 iterations')
i = 1
clf = GridSearchCV(classifiers[clf_name], {'alpha' :parameters[freq_bands[i]]},
cv = StratifiedKFold(n_splits = int(sum(y_tr==0)//n_scz_te)),
scoring = 'roc_auc', n_jobs = -1, return_train_score=True)
# Compute test and train sets
if n_BAitaSig == None:
X_tr = X[tr_idx, x_size*i:x_size*(i+1)]
X_te = X[te_idx, x_size*i:x_size*(i+1)]
else:
if x_size == sum(n_BAitaSig):
X_tr = X[tr_idx, :]
X_te = X[te_idx, :]
else:
n_temp = [0]
n_temp.extend(np.cumsum(n_BAitaSig))
X_tr = X[tr_idx, n_temp[i]:n_temp[i+1]]
X_te = X[te_idx, n_temp[i]:n_temp[i+1]]
# Standardize
scaler_out = preprocessing.StandardScaler().fit(X_tr)
X_tr = scaler_out.transform(X_tr)
X_te = scaler_out.transform(X_te)
# Fit data and save auc scores
fit = clf.fit(X_tr, y_tr)
auc[freq_bands[i]][count] = fit.score(X_te, y_te)
# Make parameter plot
#plot_grid_search(clf.cv_results_, 'score', parameters[freq_bands[i]], 'log($\lambda$) ' + freq_bands[i])
cv_results = clf.cv_results_
metric = 'score'
grid_param_1 = parameters[freq_bands[i]]
scores_mean = cv_results[('mean_test_' + metric)]
# scores_sd = cv_results[('std_test_' + metric)]
scores_mean_tr = cv_results[('mean_train_' + metric)]
# Set plot style
#plt.style.use('seaborn')
# Plot Grid search scores
sns.set(font_scale=1.5)
df1 = pd.DataFrame({'log($\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean_tr, 'type' : ['train']*len(scores_mean_tr)})
df2 = pd.DataFrame({'log($\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean, 'type' : ['test']*len(scores_mean_tr)})
sns.lineplot(x = 'log($\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = "o", data = df1, ax = ax[count_plt//3][count_plt%3])
sns.lineplot(x = 'log($\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = "o", data = df2, ax = ax[count_plt//3][count_plt%3])
ax[count_plt//3][count_plt%3].set_xlabel('log($\lambda$)', fontsize=14)
ax[count_plt//3][count_plt%3].set_ylabel('CV Average AUC' , fontsize=14)
#pprint(clf.cv_results_)
#pdb.set_trace() # Type "exit" to get out, type "c" to continue
count_plt += 1
if len(perms) == 1:
coef_idx = np.nonzero(fit.best_estimator_.coef_)
nz_coef_idx[freq_bands[i]].append(coef_idx)
nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_[coef_idx])
return auc, nz_coef_idx, nz_coef_val, count
##############################################################################
def CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,
classifiers, parameters, n_BAitaSig = None):
"""
Parameters
----------
X : np.array
Matrix with dimension (subjects)x(feature vector).
y : np.array
Vector with classifications (0: healthy, 1: schizo).
n_scz_te : int
Desired number of schizophrenic patients in each test set.
reps : range(*)
Range with desired number (*) of extra times the code should run.
separate_bands : boolean
True = seperate data into frequency bands. False = don't separate.
perms : range(*)
Range with desired number (*) of permutations.
*=1 indicates no permutations.
dir_save : string
Directory path to where the results should be saved.
classifiers : dictionary
Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}
parameters : dictionary
Dictionary containing parameters to the classifiers as in "classifiers"
Notes
-------
Saves three different values in the dir_save path:
auc : dictionary
Contains the auc-scores for each loop, either divided into bands or
with the key "all".
nz_coef_idx : dictionary
Contains the non-zero coefficient indices for each loop, either
divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the non-zero coefficient values (the weights) for each
loop, either divided into bands or with the key "all".
"""
# Check if data should be seperated into bands or not:
if separate_bands:
freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']
else:
freq_bands = ['all']
if len(perms) > 1:
y_org = y
tqdm_perms = tqdm(perms)
tqdm_reps = reps
else:
tqdm_perms = perms
tqdm_reps = tqdm(reps)
# Initialize space for values
auc = {}
nz_coef_idx= {}
nz_coef_val= {}
nb_loops = len(reps)*(sum(y==0)//n_scz_te)*len(perms)
# Define the size of X
x_size = int(X.shape[1]/len(freq_bands))
for i in freq_bands:
auc[i] = np.zeros(nb_loops) # e.g. auc = {'delta':[] , 'theta': [], 'alpha': [], ....}
nz_coef_idx[i] = []
nz_coef_val[i] = []
count = -1
for perm in tqdm_perms:
if len(perms) > 1:
y = shuffle(y_org, random_state=perm).reset_index(drop=True)
for rep in tqdm_reps:
auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y, n_scz_te, rep,
perms, classifiers, parameters, count,
freq_bands, x_size, auc, nz_coef_idx,
nz_coef_val, n_BAitaSig)
#%%
con_type = 'lps'
separate_bands = True # False = All bands together
partialData = True
atlas = 'BAita' # DKEgill, BAita, BAitaSig
sns.set(font_scale=1.5)
freq_band_type = 'DiLorenzo'
# Directories
dir_folders = r'/share/FannyMaster/PythonNew/' + atlas + '_timeseries_'
newest_date = getNewestFolderDate(dir_folders)
dir_features = dir_folders + newest_date + '/' + freq_band_type + '/Features'
dir_y_ID = r'/share/FannyMaster/PythonNew/Age_Gender.csv'
n_scz_te = 2
reps = range(1)
classifiers = {'lasso' : Lasso(max_iter = 10000)}
dir_save = dir_folders + newest_date + '/' + freq_band_type + '/classificationResults/' + con_type.capitalize()
X,y = get_Xy(dir_features, dir_y_ID, con_type, partialData)
if atlas == 'DKEgill':
X = getEgillX(X)
n_BAitaSig = None
parameters = getEgillParameters(con_type, separate_bands)
elif atlas == 'BAitaSig':
X, n_BAitaSig = significant_connected_areasBAitaSigX(X)
parameters = getBAitaSigParameters(con_type, separate_bands)
elif atlas == 'BAita':
parameters = getBAitaParameters(con_type, separate_bands)
n_BAitaSig = None
perms = range(1) # 1 = No permutations
CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,
classifiers, parameters)
|
normal
|
{
"blob_id": "69511933697905fb4f365c895264596f19dc1d8d",
"index": 5021,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig=None):\n \"\"\"\n Calculates the leave K out cross validation. \n\n Parameters\n ----------\n X : array of arrays\n Matrix containing a vector with all the features for each subject.\n Dimension (number of subjects)x(number of features).\n y : array\n A vector containing the class-information. \n Remember: 1 = healty controls, 0 = schizophrenic \n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n rep : integer\n The number of repition that has been used so far.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n freq_bands : list of strings\n Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].\n x_size : integer\n The size each X has which changes depending on freq_bands.\n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n n_BAitaSig : list of integers, optional\n The number of connections in each band when BAitaSig is used. \n The default is None.\n Returns\n -------\n auc : dictionary\n Contains the updated auc-scores for each loop, either divided into \n bands or with the key \"all\".\n nz_coef_idx : dictionary\n Contains the updated non-zero coefficient indices for each loop, \n either divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the updated non-zero coefficient values (the weights) for \n each loop, either divided into bands or with the key \"all\".\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n\n \"\"\"\n skf = StratifiedKFold(n_splits=int(sum(y == 0) // n_scz_te), shuffle=\n True, random_state=rep)\n count_plt = 0\n fig, ax = plt.subplots(2, 3, figsize=(10, 6.5))\n for tr_idx, te_idx in skf.split(X, y):\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1):\n if count_plt == 6:\n plt.suptitle(\n 'Example of line search for the regularization parameter',\n fontsize=18)\n plt.tight_layout()\n plt.subplots_adjust(top=0.84, bottom=0.15, hspace=0.5,\n wspace=0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor=(0.5, \n 0.89), borderaxespad=0.0, loc='upper center', ncol=2)\n plt.show()\n fig.savefig(\n '/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg',\n bbox_inches='tight')\n sns.reset_orig()\n raise NameError(\n 'This is just a dumb way of stopping the code after 6 iterations'\n )\n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha': parameters[\n freq_bands[i]]}, cv=StratifiedKFold(n_splits=int(sum(y_tr ==\n 0) // n_scz_te)), scoring='roc_auc', n_jobs=-1,\n return_train_score=True)\n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size * i:x_size * (i + 1)]\n X_te = X[te_idx, x_size * i:x_size * (i + 1)]\n elif x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i + 1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i + 1]]\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n scores_mean = cv_results['mean_test_' + metric]\n scores_mean_tr = cv_results['mean_train_' + metric]\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean_tr, 'type': [\n 'train'] * len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean, 'type': [\n 'test'] * len(scores_mean_tr)})\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df1, ax=ax[\n count_plt // 3][count_plt % 3])\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df2, ax=ax[\n count_plt // 3][count_plt % 3])\n ax[count_plt // 3][count_plt % 3].set_xlabel('log($\\\\lambda$)',\n fontsize=14)\n ax[count_plt // 3][count_plt % 3].set_ylabel('CV Average AUC',\n fontsize=14)\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_\n [coef_idx])\n return auc, nz_coef_idx, nz_coef_val, count\n\n\ndef CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters, n_BAitaSig=None):\n \"\"\"\n Parameters\n ----------\n X : np.array \n Matrix with dimension (subjects)x(feature vector).\n y : np.array\n Vector with classifications (0: healthy, 1: schizo).\n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n reps : range(*)\n Range with desired number (*) of extra times the code should run.\n separate_bands : boolean\n True = seperate data into frequency bands. False = don't separate.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n dir_save : string\n Directory path to where the results should be saved.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n\n Notes\n -------\n Saves three different values in the dir_save path: \n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n \n \"\"\"\n if separate_bands:\n freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']\n else:\n freq_bands = ['all']\n if len(perms) > 1:\n y_org = y\n tqdm_perms = tqdm(perms)\n tqdm_reps = reps\n else:\n tqdm_perms = perms\n tqdm_reps = tqdm(reps)\n auc = {}\n nz_coef_idx = {}\n nz_coef_val = {}\n nb_loops = len(reps) * (sum(y == 0) // n_scz_te) * len(perms)\n x_size = int(X.shape[1] / len(freq_bands))\n for i in freq_bands:\n auc[i] = np.zeros(nb_loops)\n nz_coef_idx[i] = []\n nz_coef_val[i] = []\n count = -1\n for perm in tqdm_perms:\n if len(perms) > 1:\n y = shuffle(y_org, random_state=perm).reset_index(drop=True)\n for rep in tqdm_reps:\n auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y,\n n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig)\n\n\n<mask token>\nsns.set(font_scale=1.5)\n<mask token>\nif atlas == 'DKEgill':\n X = getEgillX(X)\n n_BAitaSig = None\n parameters = getEgillParameters(con_type, separate_bands)\nelif atlas == 'BAitaSig':\n X, n_BAitaSig = significant_connected_areasBAitaSigX(X)\n parameters = getBAitaSigParameters(con_type, separate_bands)\nelif atlas == 'BAita':\n parameters = getBAitaParameters(con_type, separate_bands)\n n_BAitaSig = None\n<mask token>\nCV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters)\n",
"step-3": "<mask token>\n\n\ndef leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig=None):\n \"\"\"\n Calculates the leave K out cross validation. \n\n Parameters\n ----------\n X : array of arrays\n Matrix containing a vector with all the features for each subject.\n Dimension (number of subjects)x(number of features).\n y : array\n A vector containing the class-information. \n Remember: 1 = healty controls, 0 = schizophrenic \n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n rep : integer\n The number of repition that has been used so far.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n freq_bands : list of strings\n Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].\n x_size : integer\n The size each X has which changes depending on freq_bands.\n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n n_BAitaSig : list of integers, optional\n The number of connections in each band when BAitaSig is used. \n The default is None.\n Returns\n -------\n auc : dictionary\n Contains the updated auc-scores for each loop, either divided into \n bands or with the key \"all\".\n nz_coef_idx : dictionary\n Contains the updated non-zero coefficient indices for each loop, \n either divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the updated non-zero coefficient values (the weights) for \n each loop, either divided into bands or with the key \"all\".\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n\n \"\"\"\n skf = StratifiedKFold(n_splits=int(sum(y == 0) // n_scz_te), shuffle=\n True, random_state=rep)\n count_plt = 0\n fig, ax = plt.subplots(2, 3, figsize=(10, 6.5))\n for tr_idx, te_idx in skf.split(X, y):\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1):\n if count_plt == 6:\n plt.suptitle(\n 'Example of line search for the regularization parameter',\n fontsize=18)\n plt.tight_layout()\n plt.subplots_adjust(top=0.84, bottom=0.15, hspace=0.5,\n wspace=0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor=(0.5, \n 0.89), borderaxespad=0.0, loc='upper center', ncol=2)\n plt.show()\n fig.savefig(\n '/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg',\n bbox_inches='tight')\n sns.reset_orig()\n raise NameError(\n 'This is just a dumb way of stopping the code after 6 iterations'\n )\n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha': parameters[\n freq_bands[i]]}, cv=StratifiedKFold(n_splits=int(sum(y_tr ==\n 0) // n_scz_te)), scoring='roc_auc', n_jobs=-1,\n return_train_score=True)\n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size * i:x_size * (i + 1)]\n X_te = X[te_idx, x_size * i:x_size * (i + 1)]\n elif x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i + 1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i + 1]]\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n scores_mean = cv_results['mean_test_' + metric]\n scores_mean_tr = cv_results['mean_train_' + metric]\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean_tr, 'type': [\n 'train'] * len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean, 'type': [\n 'test'] * len(scores_mean_tr)})\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df1, ax=ax[\n count_plt // 3][count_plt % 3])\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df2, ax=ax[\n count_plt // 3][count_plt % 3])\n ax[count_plt // 3][count_plt % 3].set_xlabel('log($\\\\lambda$)',\n fontsize=14)\n ax[count_plt // 3][count_plt % 3].set_ylabel('CV Average AUC',\n fontsize=14)\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_\n [coef_idx])\n return auc, nz_coef_idx, nz_coef_val, count\n\n\ndef CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters, n_BAitaSig=None):\n \"\"\"\n Parameters\n ----------\n X : np.array \n Matrix with dimension (subjects)x(feature vector).\n y : np.array\n Vector with classifications (0: healthy, 1: schizo).\n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n reps : range(*)\n Range with desired number (*) of extra times the code should run.\n separate_bands : boolean\n True = seperate data into frequency bands. False = don't separate.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n dir_save : string\n Directory path to where the results should be saved.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n\n Notes\n -------\n Saves three different values in the dir_save path: \n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n \n \"\"\"\n if separate_bands:\n freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']\n else:\n freq_bands = ['all']\n if len(perms) > 1:\n y_org = y\n tqdm_perms = tqdm(perms)\n tqdm_reps = reps\n else:\n tqdm_perms = perms\n tqdm_reps = tqdm(reps)\n auc = {}\n nz_coef_idx = {}\n nz_coef_val = {}\n nb_loops = len(reps) * (sum(y == 0) // n_scz_te) * len(perms)\n x_size = int(X.shape[1] / len(freq_bands))\n for i in freq_bands:\n auc[i] = np.zeros(nb_loops)\n nz_coef_idx[i] = []\n nz_coef_val[i] = []\n count = -1\n for perm in tqdm_perms:\n if len(perms) > 1:\n y = shuffle(y_org, random_state=perm).reset_index(drop=True)\n for rep in tqdm_reps:\n auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y,\n n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig)\n\n\ncon_type = 'lps'\nseparate_bands = True\npartialData = True\natlas = 'BAita'\nsns.set(font_scale=1.5)\nfreq_band_type = 'DiLorenzo'\ndir_folders = '/share/FannyMaster/PythonNew/' + atlas + '_timeseries_'\nnewest_date = getNewestFolderDate(dir_folders)\ndir_features = dir_folders + newest_date + '/' + freq_band_type + '/Features'\ndir_y_ID = '/share/FannyMaster/PythonNew/Age_Gender.csv'\nn_scz_te = 2\nreps = range(1)\nclassifiers = {'lasso': Lasso(max_iter=10000)}\ndir_save = (dir_folders + newest_date + '/' + freq_band_type +\n '/classificationResults/' + con_type.capitalize())\nX, y = get_Xy(dir_features, dir_y_ID, con_type, partialData)\nif atlas == 'DKEgill':\n X = getEgillX(X)\n n_BAitaSig = None\n parameters = getEgillParameters(con_type, separate_bands)\nelif atlas == 'BAitaSig':\n X, n_BAitaSig = significant_connected_areasBAitaSigX(X)\n parameters = getBAitaSigParameters(con_type, separate_bands)\nelif atlas == 'BAita':\n parameters = getBAitaParameters(con_type, separate_bands)\n n_BAitaSig = None\nperms = range(1)\nCV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters)\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tqdm import tqdm\nimport math\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nfrom sklearn import preprocessing\nfrom sklearn.utils import shuffle\nfrom sklearn.linear_model import Lasso\nfrom utils_runOnce_classification import getEgillX, getEgillParameters\nfrom utils_runOnce_classification import significant_connected_areasBAitaSigX, getBAitaSigParameters, getBAitaParameters\nimport seaborn as sns\nfrom utils_joint import getNewestFolderDate, get_Xy\nimport pdb\n\n\ndef leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig=None):\n \"\"\"\n Calculates the leave K out cross validation. \n\n Parameters\n ----------\n X : array of arrays\n Matrix containing a vector with all the features for each subject.\n Dimension (number of subjects)x(number of features).\n y : array\n A vector containing the class-information. \n Remember: 1 = healty controls, 0 = schizophrenic \n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n rep : integer\n The number of repition that has been used so far.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n freq_bands : list of strings\n Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].\n x_size : integer\n The size each X has which changes depending on freq_bands.\n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n n_BAitaSig : list of integers, optional\n The number of connections in each band when BAitaSig is used. \n The default is None.\n Returns\n -------\n auc : dictionary\n Contains the updated auc-scores for each loop, either divided into \n bands or with the key \"all\".\n nz_coef_idx : dictionary\n Contains the updated non-zero coefficient indices for each loop, \n either divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the updated non-zero coefficient values (the weights) for \n each loop, either divided into bands or with the key \"all\".\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n\n \"\"\"\n skf = StratifiedKFold(n_splits=int(sum(y == 0) // n_scz_te), shuffle=\n True, random_state=rep)\n count_plt = 0\n fig, ax = plt.subplots(2, 3, figsize=(10, 6.5))\n for tr_idx, te_idx in skf.split(X, y):\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1):\n if count_plt == 6:\n plt.suptitle(\n 'Example of line search for the regularization parameter',\n fontsize=18)\n plt.tight_layout()\n plt.subplots_adjust(top=0.84, bottom=0.15, hspace=0.5,\n wspace=0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor=(0.5, \n 0.89), borderaxespad=0.0, loc='upper center', ncol=2)\n plt.show()\n fig.savefig(\n '/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg',\n bbox_inches='tight')\n sns.reset_orig()\n raise NameError(\n 'This is just a dumb way of stopping the code after 6 iterations'\n )\n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha': parameters[\n freq_bands[i]]}, cv=StratifiedKFold(n_splits=int(sum(y_tr ==\n 0) // n_scz_te)), scoring='roc_auc', n_jobs=-1,\n return_train_score=True)\n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size * i:x_size * (i + 1)]\n X_te = X[te_idx, x_size * i:x_size * (i + 1)]\n elif x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i + 1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i + 1]]\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n scores_mean = cv_results['mean_test_' + metric]\n scores_mean_tr = cv_results['mean_train_' + metric]\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean_tr, 'type': [\n 'train'] * len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\\\lambda$)': [math.log(i) for i in\n grid_param_1], 'CV Average AUC': scores_mean, 'type': [\n 'test'] * len(scores_mean_tr)})\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df1, ax=ax[\n count_plt // 3][count_plt % 3])\n sns.lineplot(x='log($\\\\lambda$)', y='CV Average AUC', style=\n 'type', legend=False, markers='o', data=df2, ax=ax[\n count_plt // 3][count_plt % 3])\n ax[count_plt // 3][count_plt % 3].set_xlabel('log($\\\\lambda$)',\n fontsize=14)\n ax[count_plt // 3][count_plt % 3].set_ylabel('CV Average AUC',\n fontsize=14)\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_\n [coef_idx])\n return auc, nz_coef_idx, nz_coef_val, count\n\n\ndef CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters, n_BAitaSig=None):\n \"\"\"\n Parameters\n ----------\n X : np.array \n Matrix with dimension (subjects)x(feature vector).\n y : np.array\n Vector with classifications (0: healthy, 1: schizo).\n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n reps : range(*)\n Range with desired number (*) of extra times the code should run.\n separate_bands : boolean\n True = seperate data into frequency bands. False = don't separate.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n dir_save : string\n Directory path to where the results should be saved.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n\n Notes\n -------\n Saves three different values in the dir_save path: \n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n \n \"\"\"\n if separate_bands:\n freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']\n else:\n freq_bands = ['all']\n if len(perms) > 1:\n y_org = y\n tqdm_perms = tqdm(perms)\n tqdm_reps = reps\n else:\n tqdm_perms = perms\n tqdm_reps = tqdm(reps)\n auc = {}\n nz_coef_idx = {}\n nz_coef_val = {}\n nb_loops = len(reps) * (sum(y == 0) // n_scz_te) * len(perms)\n x_size = int(X.shape[1] / len(freq_bands))\n for i in freq_bands:\n auc[i] = np.zeros(nb_loops)\n nz_coef_idx[i] = []\n nz_coef_val[i] = []\n count = -1\n for perm in tqdm_perms:\n if len(perms) > 1:\n y = shuffle(y_org, random_state=perm).reset_index(drop=True)\n for rep in tqdm_reps:\n auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y,\n n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig)\n\n\ncon_type = 'lps'\nseparate_bands = True\npartialData = True\natlas = 'BAita'\nsns.set(font_scale=1.5)\nfreq_band_type = 'DiLorenzo'\ndir_folders = '/share/FannyMaster/PythonNew/' + atlas + '_timeseries_'\nnewest_date = getNewestFolderDate(dir_folders)\ndir_features = dir_folders + newest_date + '/' + freq_band_type + '/Features'\ndir_y_ID = '/share/FannyMaster/PythonNew/Age_Gender.csv'\nn_scz_te = 2\nreps = range(1)\nclassifiers = {'lasso': Lasso(max_iter=10000)}\ndir_save = (dir_folders + newest_date + '/' + freq_band_type +\n '/classificationResults/' + con_type.capitalize())\nX, y = get_Xy(dir_features, dir_y_ID, con_type, partialData)\nif atlas == 'DKEgill':\n X = getEgillX(X)\n n_BAitaSig = None\n parameters = getEgillParameters(con_type, separate_bands)\nelif atlas == 'BAitaSig':\n X, n_BAitaSig = significant_connected_areasBAitaSigX(X)\n parameters = getBAitaSigParameters(con_type, separate_bands)\nelif atlas == 'BAita':\n parameters = getBAitaParameters(con_type, separate_bands)\n n_BAitaSig = None\nperms = range(1)\nCV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,\n classifiers, parameters)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 26 18:39:26 2020\n\n@author: Fanny Fredriksson and Karen Marie Sandø Ambrosen\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tqdm import tqdm #count ffor loops\nimport math\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nfrom sklearn import preprocessing\nfrom sklearn.utils import shuffle\nfrom sklearn.linear_model import Lasso\nfrom utils_runOnce_classification import getEgillX, getEgillParameters\nfrom utils_runOnce_classification import significant_connected_areasBAitaSigX, getBAitaSigParameters, getBAitaParameters\nimport seaborn as sns\nfrom utils_joint import getNewestFolderDate, get_Xy\n\nimport pdb\n#{}\n#[]\n\n \n##############################################################################\ndef leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig = None):\n \"\"\"\n Calculates the leave K out cross validation. \n\n Parameters\n ----------\n X : array of arrays\n Matrix containing a vector with all the features for each subject.\n Dimension (number of subjects)x(number of features).\n y : array\n A vector containing the class-information. \n Remember: 1 = healty controls, 0 = schizophrenic \n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n rep : integer\n The number of repition that has been used so far.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n freq_bands : list of strings\n Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].\n x_size : integer\n The size each X has which changes depending on freq_bands.\n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n n_BAitaSig : list of integers, optional\n The number of connections in each band when BAitaSig is used. \n The default is None.\n Returns\n -------\n auc : dictionary\n Contains the updated auc-scores for each loop, either divided into \n bands or with the key \"all\".\n nz_coef_idx : dictionary\n Contains the updated non-zero coefficient indices for each loop, \n either divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the updated non-zero coefficient values (the weights) for \n each loop, either divided into bands or with the key \"all\".\n count : integer\n Used to know how many loops that have been made due to the pre \n allocated space for AUC.\n\n \"\"\"\n \n skf = StratifiedKFold(n_splits=int(sum(y==0)//n_scz_te),shuffle=True, random_state = rep)\n count_plt = 0\n fig, ax = plt.subplots(2,3 , figsize=(10,6.5))\n for tr_idx, te_idx in skf.split(X,y):\n # Compute test and train targets\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n \n # Make gridsearch function\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1): #range(len(freq_bands)):\n if count_plt == 6:\n plt.suptitle('Example of line search for the regularization parameter', fontsize= 18)\n plt.tight_layout()\n plt.subplots_adjust(top = 0.84, bottom = 0.15, hspace = 0.5, wspace = 0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor = (0.5, 0.89), \n borderaxespad = 0., loc = 'upper center', ncol = 2)\n \n plt.show()\n fig.savefig('/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg', bbox_inches = 'tight')\n sns.reset_orig()\n raise NameError('This is just a dumb way of stopping the code after 6 iterations')\n \n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha' :parameters[freq_bands[i]]}, \n cv = StratifiedKFold(n_splits = int(sum(y_tr==0)//n_scz_te)), \n scoring = 'roc_auc', n_jobs = -1, return_train_score=True)\n # Compute test and train sets \n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size*i:x_size*(i+1)]\n X_te = X[te_idx, x_size*i:x_size*(i+1)]\n else:\n if x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i+1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i+1]]\n \n \n # Standardize\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n\n # Fit data and save auc scores\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n \n # Make parameter plot\n #plot_grid_search(clf.cv_results_, 'score', parameters[freq_bands[i]], 'log($\\lambda$) ' + freq_bands[i])\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n \n scores_mean = cv_results[('mean_test_' + metric)]\n # scores_sd = cv_results[('std_test_' + metric)]\n scores_mean_tr = cv_results[('mean_train_' + metric)]\n \n # Set plot style\n #plt.style.use('seaborn')\n \n # Plot Grid search scores\n\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean_tr, 'type' : ['train']*len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean, 'type' : ['test']*len(scores_mean_tr)})\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df1, ax = ax[count_plt//3][count_plt%3])\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df2, ax = ax[count_plt//3][count_plt%3])\n\n ax[count_plt//3][count_plt%3].set_xlabel('log($\\lambda$)', fontsize=14)\n ax[count_plt//3][count_plt%3].set_ylabel('CV Average AUC' , fontsize=14) \n \n #pprint(clf.cv_results_)\n #pdb.set_trace() # Type \"exit\" to get out, type \"c\" to continue\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_[coef_idx])\n\n return auc, nz_coef_idx, nz_coef_val, count\n\n##############################################################################\ndef CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save, \n classifiers, parameters, n_BAitaSig = None):\n \"\"\"\n Parameters\n ----------\n X : np.array \n Matrix with dimension (subjects)x(feature vector).\n y : np.array\n Vector with classifications (0: healthy, 1: schizo).\n n_scz_te : int\n Desired number of schizophrenic patients in each test set.\n reps : range(*)\n Range with desired number (*) of extra times the code should run.\n separate_bands : boolean\n True = seperate data into frequency bands. False = don't separate.\n perms : range(*)\n Range with desired number (*) of permutations. \n *=1 indicates no permutations.\n dir_save : string\n Directory path to where the results should be saved.\n classifiers : dictionary\n Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}\n parameters : dictionary\n Dictionary containing parameters to the classifiers as in \"classifiers\"\n\n Notes\n -------\n Saves three different values in the dir_save path: \n auc : dictionary\n Contains the auc-scores for each loop, either divided into bands or \n with the key \"all\".\n nz_coef_idx : dictionary\n Contains the non-zero coefficient indices for each loop, either \n divided into bands or with the key \"all\".\n nz_coef_val : dictionary\n Contains the non-zero coefficient values (the weights) for each \n loop, either divided into bands or with the key \"all\".\n \n \"\"\" \n \n # Check if data should be seperated into bands or not:\n if separate_bands:\n freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']\n else:\n freq_bands = ['all']\n \n if len(perms) > 1:\n y_org = y\n tqdm_perms = tqdm(perms)\n tqdm_reps = reps\n else: \n tqdm_perms = perms\n tqdm_reps = tqdm(reps)\n \n # Initialize space for values \n auc = {}\n nz_coef_idx= {}\n nz_coef_val= {}\n nb_loops = len(reps)*(sum(y==0)//n_scz_te)*len(perms)\n # Define the size of X\n x_size = int(X.shape[1]/len(freq_bands))\n for i in freq_bands:\n auc[i] = np.zeros(nb_loops) # e.g. auc = {'delta':[] , 'theta': [], 'alpha': [], ....}\n nz_coef_idx[i] = []\n nz_coef_val[i] = []\n \n count = -1\n for perm in tqdm_perms:\n if len(perms) > 1:\n y = shuffle(y_org, random_state=perm).reset_index(drop=True)\n \n for rep in tqdm_reps:\n auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y, n_scz_te, rep, \n perms, classifiers, parameters, count, \n freq_bands, x_size, auc, nz_coef_idx, \n nz_coef_val, n_BAitaSig)\n\n\n\n#%%\ncon_type = 'lps'\nseparate_bands = True # False = All bands together\npartialData = True\n\natlas = 'BAita' # DKEgill, BAita, BAitaSig\n\nsns.set(font_scale=1.5)\nfreq_band_type = 'DiLorenzo'\n# Directories\ndir_folders = r'/share/FannyMaster/PythonNew/' + atlas + '_timeseries_'\nnewest_date = getNewestFolderDate(dir_folders)\ndir_features = dir_folders + newest_date + '/' + freq_band_type + '/Features' \ndir_y_ID = r'/share/FannyMaster/PythonNew/Age_Gender.csv'\nn_scz_te = 2\nreps = range(1)\nclassifiers = {'lasso' : Lasso(max_iter = 10000)} \ndir_save = dir_folders + newest_date + '/' + freq_band_type + '/classificationResults/' + con_type.capitalize() \nX,y = get_Xy(dir_features, dir_y_ID, con_type, partialData)\n\nif atlas == 'DKEgill':\n X = getEgillX(X)\n n_BAitaSig = None\n parameters = getEgillParameters(con_type, separate_bands)\nelif atlas == 'BAitaSig':\n X, n_BAitaSig = significant_connected_areasBAitaSigX(X)\n parameters = getBAitaSigParameters(con_type, separate_bands)\nelif atlas == 'BAita':\n parameters = getBAitaParameters(con_type, separate_bands)\n n_BAitaSig = None\n\nperms = range(1) # 1 = No permutations\nCV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save, \n classifiers, parameters)\n\n\n\n\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
## n.b. uses python 3 wordseg virtualenv (wordseg needs Py3)
# e.g. $ source ~/venvs/Py3/wordseg/bin/activate
## wordseg: see https://wordseg.readthedocs.io
from __future__ import division
import io, collections, os, glob, csv, re
from scipy.stats import entropy
from copy import deepcopy
# get username
import getpass
uname = getpass.getuser()
## get corpus stats
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict, bdict):
owu = owus/utts
lineout1 = [language, corpus, child, utts, owu]
# corpus types, tokens
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
# diphone distributions
boundarydist = []
diphonedist = []
k=0
diphfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_diphone-system.txt'
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n') # only columns 1-3 are used by lnre.R
for diph, denom in ordered:
k+=1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom # boundary prob
boundarydist.append(boundprob)
relfreq = denom / tokencount # diphone prob
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph, relfreq, boundprob))
writefile.close()
# entropy calcs
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
# run Zipf LNRE fit (clear old file first)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm '+ tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R '+ diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else: # else 3 zeros
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
# get C_WALS stat (not in use)
#langcode = langcodes[lang]
return lineout1
## run wordseg
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount, wcount):
# start point is output of process_corpus()
lineout2 = deepcopy(lineout1)
meanlength = round(pcount/wcount, 6) # phones per word
pboundary = round(wcount/pcount, 6) # words per phone
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
# prepare filenames
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_gold-for-wordseg.txt'
prepfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_prepared-for-wordseg.txt'
segfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '.txt'
evalfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '_eval.txt'
# write text so far to temporary file
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
# prepare gold and input files for wordseg
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' % (tmpfile, goldfile, prepfile)) # ignore punctuation
lineout2.append(algo)
# run wordseg command
if algo=='dibs': # DIBS-phrasal uses phrases (utterances) as chunks
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile, algo, tmpfile, segfile))
elif algo=='utt_baseline': # utterance baseline
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo=='rand_baseline': # random baseline
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile))
elif algo=='unit_baseline': # basic unit baseline
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo=='oracle': # oracle baseline: P(word|phone)
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile, pboundary, segfile))
elif algo=='tp_ftp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_btp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_mi': # transitional prob: mutual information
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile, segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
# evaluate
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\d]*', '', line.rstrip())) # strip from the start until first number encountered
eval.close()
print(lineout2)
return lineout2
## open results file
statsfile = '/Users/' + uname + '/Corpora/CHILDES/segmentation_experiment_stats.csv'
statsopen = open(statsfile,'wt')
statscsv = csv.writer(statsopen)
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances', 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy', 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones', 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR', 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P', 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P', 'boundary.noedge.R', 'boundary.noedge.F'))
## input directory (the phonemized files)
thousand = re.compile('000$')
algos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle', 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']
directory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'
for filein in glob.glob(directory+'*_phonemes.txt', recursive=True):
print(filein)
# parse filename
(language, corpus, child) = filein.split('/')[-1].split('_')[0:3]
# read corpus
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords==1:
owucount += 1
#print('utterance: %s' % (line.rstrip()))
phones = line.split() # split on whitespace
nphones = len(phones) - ewords
phonecount += nphones
for (i, phone) in enumerate(phones):
if i==0 or phones[i]==';eword' or phones[i-1]==';eword':
pass # ignore phone 1 in utterance or word and word delimiters
else:
diphone = phones[i-1] + phones[i]
phondict[diphone] += 1
if i==1 or phones[i+1]==';eword' or phones[i-2]==';eword':
#print('boundary diphone: %s' % (diphone))
boundaries[diphone] += 1
#print('count: %i' % (boundaries[diphone]))
# reached iteration point? (round 1000)
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
# run again at end of file, if not round 1000 line count
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
myfile.close()
print('FINISHED')
print('see '+ statsfile)
|
normal
|
{
"blob_id": "4ba0affd3cbdc2652274213a8d410b541fb3edb4",
"index": 4584,
"step-1": "<mask token>\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\n<mask token>\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\n<mask token>\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-3": "<mask token>\nuname = getpass.getuser()\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\nstatsfile = ('/Users/' + uname +\n '/Corpora/CHILDES/segmentation_experiment_stats.csv')\nstatsopen = open(statsfile, 'wt')\nstatscsv = csv.writer(statsopen)\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\nthousand = re.compile('000$')\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',\n 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-4": "from __future__ import division\nimport io, collections, os, glob, csv, re\nfrom scipy.stats import entropy\nfrom copy import deepcopy\nimport getpass\nuname = getpass.getuser()\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\nstatsfile = ('/Users/' + uname +\n '/Corpora/CHILDES/segmentation_experiment_stats.csv')\nstatsopen = open(statsfile, 'wt')\nstatscsv = csv.writer(statsopen)\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\nthousand = re.compile('000$')\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',\n 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-5": "## n.b. uses python 3 wordseg virtualenv (wordseg needs Py3)\r\n# e.g. $ source ~/venvs/Py3/wordseg/bin/activate\r\n\r\n## wordseg: see https://wordseg.readthedocs.io\r\nfrom __future__ import division\r\nimport io, collections, os, glob, csv, re\r\nfrom scipy.stats import entropy\r\nfrom copy import deepcopy\r\n\r\n# get username\r\nimport getpass\r\nuname = getpass.getuser()\r\n\r\n## get corpus stats\r\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict, bdict):\r\n owu = owus/utts\r\n lineout1 = [language, corpus, child, utts, owu]\r\n # corpus types, tokens\r\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\r\n tokencount = sum(pdict.values())\r\n lineout1.append(tokencount)\r\n typecount = len(ordered)\r\n lineout1.append(typecount)\r\n ttr = typecount / tokencount\r\n lineout1.append(ttr)\r\n # diphone distributions\r\n boundarydist = []\r\n diphonedist = []\r\n k=0\r\n diphfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_diphone-system.txt'\r\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\r\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n') # only columns 1-3 are used by lnre.R\r\n for diph, denom in ordered:\r\n k+=1\r\n if bdict[diph]:\r\n num = bdict[diph]\r\n else:\r\n num = 0\r\n boundprob = num / denom # boundary prob\r\n boundarydist.append(boundprob)\r\n relfreq = denom / tokencount # diphone prob\r\n diphonedist.append(relfreq)\r\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph, relfreq, boundprob))\r\n writefile.close()\r\n # entropy calcs\r\n boundaryH = entropy(boundarydist, qk=None, base=2)\r\n lineout1.append(boundaryH)\r\n diphoneH = entropy(diphonedist, qk=None, base=2)\r\n lineout1.append(diphoneH)\r\n # run Zipf LNRE fit (clear old file first)\r\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\r\n cmd1 = 'rm '+ tmplnre\r\n os.system(cmd1)\r\n cmd2 = 'Rscript lnre.R '+ diphfile\r\n os.system(cmd2)\r\n if os.path.exists(tmplnre):\r\n with open(tmplnre, 'r') as lnre:\r\n for line in lnre:\r\n lineout1.append(line.rstrip())\r\n lnre.close()\r\n else: # else 3 zeros\r\n lineout1.append(0)\r\n lineout1.append(0)\r\n lineout1.append(0)\r\n # get C_WALS stat (not in use)\r\n #langcode = langcodes[lang]\r\n return lineout1\r\n\r\n## run wordseg\r\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount, wcount):\r\n # start point is output of process_corpus()\r\n lineout2 = deepcopy(lineout1)\r\n meanlength = round(pcount/wcount, 6) # phones per word\r\n pboundary = round(wcount/pcount, 6) # words per phone\r\n lineout2.append(wcount)\r\n lineout2.append(pcount)\r\n lineout2.append(meanlength)\r\n lineout2.append(pboundary)\r\n # prepare filenames\r\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\r\n goldfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_gold-for-wordseg.txt'\r\n prepfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_prepared-for-wordseg.txt'\r\n segfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '.txt'\r\n evalfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '_eval.txt'\r\n # write text so far to temporary file\r\n tmp = open(tmpfile, 'w')\r\n tmp.write(text)\r\n tmp.close()\r\n # prepare gold and input files for wordseg\r\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' % (tmpfile, goldfile, prepfile)) # ignore punctuation\r\n lineout2.append(algo)\r\n # run wordseg command\r\n if algo=='dibs': # DIBS-phrasal uses phrases (utterances) as chunks\r\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile, algo, tmpfile, segfile))\r\n elif algo=='utt_baseline': # utterance baseline\r\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\r\n elif algo=='rand_baseline': # random baseline\r\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile))\r\n elif algo=='unit_baseline': # basic unit baseline\r\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\r\n elif algo=='oracle': # oracle baseline: P(word|phone)\r\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile, pboundary, segfile))\r\n elif algo=='tp_ftp': # transitional prob: forwards\r\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile, segfile))\r\n elif algo=='tp_btp': # transitional prob: forwards\r\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile, segfile))\r\n elif algo=='tp_mi': # transitional prob: mutual information\r\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile, segfile))\r\n else:\r\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\r\n # evaluate\r\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\r\n with open(evalfile, 'r') as eval:\r\n for line in eval:\r\n lineout2.append(re.sub('^[^\\d]*', '', line.rstrip())) # strip from the start until first number encountered\r\n eval.close()\r\n print(lineout2)\r\n return lineout2\r\n\r\n## open results file\r\nstatsfile = '/Users/' + uname + '/Corpora/CHILDES/segmentation_experiment_stats.csv'\r\nstatsopen = open(statsfile,'wt')\r\nstatscsv = csv.writer(statsopen)\r\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances', 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy', 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones', 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR', 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P', 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P', 'boundary.noedge.R', 'boundary.noedge.F'))\r\n\r\n## input directory (the phonemized files)\r\nthousand = re.compile('000$')\r\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle', 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\r\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\r\nfor filein in glob.glob(directory+'*_phonemes.txt', recursive=True):\r\n print(filein)\r\n # parse filename\r\n (language, corpus, child) = filein.split('/')[-1].split('_')[0:3]\r\n # read corpus\r\n phondict = collections.Counter()\r\n boundaries = collections.Counter()\r\n phonecount = 0\r\n wordcount = 0\r\n with io.open(filein, 'r', encoding='utf8') as myfile:\r\n linecount = 0\r\n owucount = 0\r\n inputsofar = ''\r\n for line in myfile:\r\n inputsofar += line\r\n linecount += 1\r\n ewords = line.count(';eword')\r\n wordcount += ewords\r\n if ewords==1:\r\n owucount += 1\r\n #print('utterance: %s' % (line.rstrip()))\r\n phones = line.split() # split on whitespace\r\n nphones = len(phones) - ewords\r\n phonecount += nphones\r\n for (i, phone) in enumerate(phones):\r\n if i==0 or phones[i]==';eword' or phones[i-1]==';eword':\r\n pass # ignore phone 1 in utterance or word and word delimiters\r\n else:\r\n diphone = phones[i-1] + phones[i]\r\n phondict[diphone] += 1\r\n if i==1 or phones[i+1]==';eword' or phones[i-2]==';eword':\r\n #print('boundary diphone: %s' % (diphone))\r\n boundaries[diphone] += 1\r\n #print('count: %i' % (boundaries[diphone]))\r\n # reached iteration point? (round 1000)\r\n if thousand.search(str(linecount)):\r\n csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)\r\n for a in algos:\r\n csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)\r\n statscsv.writerow((csvline2))\r\n # run again at end of file, if not round 1000 line count\r\n if not thousand.search(str(linecount)):\r\n csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)\r\n for a in algos:\r\n csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)\r\n statscsv.writerow((csvline2))\r\n myfile.close()\r\n\r\nprint('FINISHED')\r\nprint('see '+ statsfile)\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django import http
from django.utils import simplejson as json
import urllib2
import logging
from google.appengine.api import urlfetch
import cmath
import math
from ams.forthsquare import ForthSquare
from ams.twitter import Twitter
OAUTH_TOKEN='3NX4ATMVS35LKIP25ZOKIVBRGAHFREKGNHTAKQ5NPGMCWOE0'
DEFAULT_RADIUS = 500.0
DEFAULT_LIMIT = 5
forthsquare = ForthSquare()
twitter = Twitter()
#arts, education,events, food, night, outdoors, professional, residence, shop, travel
CATEGORIES = [('arts','4d4b7104d754a06370d81259'),\
('education','4d4b7105d754a06372d81259'),\
('events','4d4b7105d754a06373d81259'),\
('food','4d4b7105d754a06374d81259'),\
('night','4d4b7105d754a06376d81259'),\
('outdoors','4d4b7105d754a06377d81259'),\
('professional','4d4b7105d754a06375d81259'),\
('residence','4e67e38e036454776db1fb3a'),\
('shop','4d4b7105d754a06378d81259'),\
('travel','4d4b7105d754a06379d81259')]
def venues(request):
if not request.GET or not request.GET.get(u'll'):
return http.HttpResponseBadRequest('Wrong data')
pos = request.GET.get(u'll')
lat0 = float(pos.split(',')[0])
lng0 = float(pos.split(',')[1])
categories = []
param_categories = request.GET.get(u'categories')
print 'PARAM CATEGORIES: ' + str(param_categories)
if param_categories:
categories = param_categories.split(',')
print '0-point coord:', lat0, lng0
radius = '&radius=%s' %(DEFAULT_RADIUS)
limit = '&limit=%s' %(DEFAULT_LIMIT)
for categoryName in categories:
categoryId = getCategoryId(categoryName)
filter_url_categories = '&categoryId=%s' %(categoryId)
json_data = forthsquare.venues(pos, limit, radius, filter_url_categories)
# print 'data:', json_data
venues = json_data['response']['venues']
print "got some venues"
if request.GET.get('alpha') and request.GET.get('beta') and request.GET.get('gamma'):
print 'point2'
alpha = float(request.GET.get('alpha'))
beta = float(request.GET.get('beta'))
gamma = float(request.GET.get('gamma'))
if request.GET.get('radius'):
float(request.GET.get('radius'))
else:
radius = DEFAULT_RADIUS
venuesInRadar = list()
for i in venues:
# print i
if 'url' in i:
print i['url']
if i and i.get('location') and i.get('location').get('lat') and i.get('location').get('lng') and i.get('location').get('distance'):
lat = float(i.get('location').get('lat'))
lng = float(i.get('location').get('lng'))
distance = float(i.get('location').get('distance'))
print 'lat:', lat
print 'lng:', lng
print 'distance:', distance
if venueInRadarRange(lat0, lng0, alpha, beta, gamma, radius, DEFAULT_SPREAD_ANGLE, lat, lng, distance):
# res = {'lat' : lat, 'lng': lng, 'id' : i['id'].encode("utf-8"), 'name' : i['name'].encode("utf-8") }
venuesInRadar.append(forthsquare.getVenueData(i))
print len(venuesInRadar)
print venuesInRadar
print 'point3'
else:
venuesInRadar = list()
for i in venues:
venuesInRadar.append(forthsquare.getVenueData(i))
'''
if len(venuesInRadar) == 1:
# return detailed information
response = http.HttpResponse(venuesInRadar[0],
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
'''
print venuesInRadar
print type(venuesInRadar)
print 'point4'
response = http.HttpResponse(json.dumps(venuesInRadar),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def getCategoryId(category):
for name,id in CATEGORIES:
if name == category:
return id
def comments(request):
# id = '4a688ba1f964a52088ca1fe3'
if not request.GET or not request.GET.get(u'id'):
return http.HttpResponseBadRequest('Wrong data')
id = request.GET.get(u'id')
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/tips?sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
# json_data = json.loads(json_raw)
response = http.HttpResponse(json.dumps(json_raw),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def getPhoto(id):
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
if not json_raw:
return None
json_data = json.loads(json_raw)
print json_data
return json_data
def photos(request):
# id = '4a688ba1f964a52088ca1fe3'
if not request.GET or not request.GET.get(u'id'):
return http.HttpResponseBadRequest('Wrong data')
id = request.GET.get(u'id')
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
# json_data = json.loads(json_raw)
if not json_raw:
return http.DoesNotExist('photo does not exist')
response = http.HttpResponse(json.dumps(json_raw),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def tweets(request):
radius="0.02km"
if not request.GET or not request.GET.get(u'll'):
return http.HttpResponseBadRequest('Wrong data')
pos = request.GET.get(u'll')
result = twitter.tweets(radius, pos)
response = http.HttpResponse(result,
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
|
normal
|
{
"blob_id": "bd1fbdf70bae7d5853bac8fae83343dfa188ca19",
"index": 5391,
"step-1": "from django import http\nfrom django.utils import simplejson as json\nimport urllib2\nimport logging\nfrom google.appengine.api import urlfetch\nimport cmath\nimport math\nfrom ams.forthsquare import ForthSquare\nfrom ams.twitter import Twitter\n\n\nOAUTH_TOKEN='3NX4ATMVS35LKIP25ZOKIVBRGAHFREKGNHTAKQ5NPGMCWOE0'\n\nDEFAULT_RADIUS = 500.0\nDEFAULT_LIMIT = 5\n\nforthsquare = ForthSquare()\ntwitter = Twitter()\n\n#arts, education,events, food, night, outdoors, professional, residence, shop, travel\nCATEGORIES = [('arts','4d4b7104d754a06370d81259'),\\\n('education','4d4b7105d754a06372d81259'),\\\n('events','4d4b7105d754a06373d81259'),\\\n('food','4d4b7105d754a06374d81259'),\\\n('night','4d4b7105d754a06376d81259'),\\\n('outdoors','4d4b7105d754a06377d81259'),\\\n('professional','4d4b7105d754a06375d81259'),\\\n('residence','4e67e38e036454776db1fb3a'),\\\n('shop','4d4b7105d754a06378d81259'),\\\n('travel','4d4b7105d754a06379d81259')]\n\ndef venues(request):\n\n if not request.GET or not request.GET.get(u'll'):\n return http.HttpResponseBadRequest('Wrong data')\n\n pos = request.GET.get(u'll')\n lat0 = float(pos.split(',')[0])\n lng0 = float(pos.split(',')[1])\n \n categories = []\n param_categories = request.GET.get(u'categories')\n print 'PARAM CATEGORIES: ' + str(param_categories)\n if param_categories:\n categories = param_categories.split(',')\n\n print '0-point coord:', lat0, lng0\n\n radius = '&radius=%s' %(DEFAULT_RADIUS)\n limit = '&limit=%s' %(DEFAULT_LIMIT)\n\n for categoryName in categories:\n categoryId = getCategoryId(categoryName)\n filter_url_categories = '&categoryId=%s' %(categoryId)\n json_data = forthsquare.venues(pos, limit, radius, filter_url_categories) \n\n # print 'data:', json_data\n venues = json_data['response']['venues']\n print \"got some venues\"\n\n if request.GET.get('alpha') and request.GET.get('beta') and request.GET.get('gamma'):\n print 'point2'\n alpha = float(request.GET.get('alpha'))\n beta = float(request.GET.get('beta'))\n gamma = float(request.GET.get('gamma'))\n if request.GET.get('radius'):\n float(request.GET.get('radius'))\n else:\n radius = DEFAULT_RADIUS\n\n\n venuesInRadar = list()\n for i in venues:\n# print i\n if 'url' in i:\n print i['url']\n if i and i.get('location') and i.get('location').get('lat') and i.get('location').get('lng') and i.get('location').get('distance'):\n lat = float(i.get('location').get('lat'))\n lng = float(i.get('location').get('lng'))\n distance = float(i.get('location').get('distance'))\n print 'lat:', lat\n print 'lng:', lng\n print 'distance:', distance\n if venueInRadarRange(lat0, lng0, alpha, beta, gamma, radius, DEFAULT_SPREAD_ANGLE, lat, lng, distance):\n# res = {'lat' : lat, 'lng': lng, 'id' : i['id'].encode(\"utf-8\"), 'name' : i['name'].encode(\"utf-8\") }\n venuesInRadar.append(forthsquare.getVenueData(i))\n\n print len(venuesInRadar)\n print venuesInRadar\n print 'point3'\n else:\n venuesInRadar = list()\n for i in venues:\n venuesInRadar.append(forthsquare.getVenueData(i))\n\n '''\n if len(venuesInRadar) == 1:\n # return detailed information\n response = http.HttpResponse(venuesInRadar[0], \n content_type='application/json')\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n '''\n\n print venuesInRadar\n print type(venuesInRadar)\n print 'point4'\n response = http.HttpResponse(json.dumps(venuesInRadar), \n content_type='application/json')\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\ndef getCategoryId(category):\n for name,id in CATEGORIES:\n if name == category: \n return id\n\ndef comments(request):\n# id = '4a688ba1f964a52088ca1fe3'\n if not request.GET or not request.GET.get(u'id'):\n return http.HttpResponseBadRequest('Wrong data')\n\n id = request.GET.get(u'id')\n\n foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/tips?sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)\n\n foursquareResponse = urllib2.urlopen(foursquareRequest)\n json_raw = foursquareResponse.read()\n# json_data = json.loads(json_raw)\n\n response = http.HttpResponse(json.dumps(json_raw), \n content_type='application/json')\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\ndef getPhoto(id):\n foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)\n\n foursquareResponse = urllib2.urlopen(foursquareRequest)\n json_raw = foursquareResponse.read()\n if not json_raw:\n return None\n\n json_data = json.loads(json_raw)\n print json_data\n\n return json_data\n\ndef photos(request):\n# id = '4a688ba1f964a52088ca1fe3'\n if not request.GET or not request.GET.get(u'id'):\n return http.HttpResponseBadRequest('Wrong data')\n\n id = request.GET.get(u'id')\n\n foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)\n\n foursquareResponse = urllib2.urlopen(foursquareRequest)\n json_raw = foursquareResponse.read()\n# json_data = json.loads(json_raw)\n\n if not json_raw:\n return http.DoesNotExist('photo does not exist')\n\n response = http.HttpResponse(json.dumps(json_raw), \n content_type='application/json')\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\ndef tweets(request):\n radius=\"0.02km\"\n if not request.GET or not request.GET.get(u'll'):\n return http.HttpResponseBadRequest('Wrong data')\n\n pos = request.GET.get(u'll')\n result = twitter.tweets(radius, pos)\n response = http.HttpResponse(result, \n content_type='application/json')\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, [email protected] and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class LogisticsPlanningTool(Document):
def autoname(self):
if self.customer:
self.name = "{0}-{1}-{2}".format(self.customer, self.territory, self.schedule_delivery_date)
else:
self.name = "{0}-{1}".format(self.territory, self.schedule_delivery_date)
@frappe.whitelist(True)
def get_atls(ps, pe, territory=None, customer=None, include_pending=None):
conds = ""
if territory and str(territory) != str("Nigeria"):
conds += ' AND territory = "%s" ' % territory
if customer:
conds += ' AND customer = "%s" ' % customer
if not include_pending:
conds += " AND delivery_date BETWEEN DATE('%s') AND DATE('%s') " % (ps, pe)
return frappe.db.sql(
"SELECT name as authority_to_load, IFNULL(delivery_date, transaction_date) as delivery_date , customer, territory from `tabAuthority to Load` WHERE name NOT IN (SELECT l.name FROM `tabLogistics Planning Tool` l INNER JOIN `tabLogistics Planning Tool Detail` c ON(l.name=c.parent) WHERE c.status != 'Delivered') %s ORDER BY territory " % (
conds), as_dict=1)
|
normal
|
{
"blob_id": "4cbb78234ef6e63b856099060ecaeea1779d6ac5",
"index": 8412,
"step-1": "<mask token>\n\n\nclass LogisticsPlanningTool(Document):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LogisticsPlanningTool(Document):\n\n def autoname(self):\n if self.customer:\n self.name = '{0}-{1}-{2}'.format(self.customer, self.territory,\n self.schedule_delivery_date)\n else:\n self.name = '{0}-{1}'.format(self.territory, self.\n schedule_delivery_date)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LogisticsPlanningTool(Document):\n\n def autoname(self):\n if self.customer:\n self.name = '{0}-{1}-{2}'.format(self.customer, self.territory,\n self.schedule_delivery_date)\n else:\n self.name = '{0}-{1}'.format(self.territory, self.\n schedule_delivery_date)\n\n\[email protected](True)\ndef get_atls(ps, pe, territory=None, customer=None, include_pending=None):\n conds = ''\n if territory and str(territory) != str('Nigeria'):\n conds += ' AND territory = \"%s\" ' % territory\n if customer:\n conds += ' AND customer = \"%s\" ' % customer\n if not include_pending:\n conds += \" AND delivery_date BETWEEN DATE('%s') AND DATE('%s') \" % (ps,\n pe)\n return frappe.db.sql(\n \"SELECT name as authority_to_load, IFNULL(delivery_date, transaction_date) as delivery_date , customer, territory from `tabAuthority to Load` WHERE name NOT IN (SELECT l.name FROM `tabLogistics Planning Tool` l INNER JOIN `tabLogistics Planning Tool Detail` c ON(l.name=c.parent) WHERE c.status != 'Delivered') %s ORDER BY territory \"\n % conds, as_dict=1)\n",
"step-4": "from __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\n\nclass LogisticsPlanningTool(Document):\n\n def autoname(self):\n if self.customer:\n self.name = '{0}-{1}-{2}'.format(self.customer, self.territory,\n self.schedule_delivery_date)\n else:\n self.name = '{0}-{1}'.format(self.territory, self.\n schedule_delivery_date)\n\n\[email protected](True)\ndef get_atls(ps, pe, territory=None, customer=None, include_pending=None):\n conds = ''\n if territory and str(territory) != str('Nigeria'):\n conds += ' AND territory = \"%s\" ' % territory\n if customer:\n conds += ' AND customer = \"%s\" ' % customer\n if not include_pending:\n conds += \" AND delivery_date BETWEEN DATE('%s') AND DATE('%s') \" % (ps,\n pe)\n return frappe.db.sql(\n \"SELECT name as authority_to_load, IFNULL(delivery_date, transaction_date) as delivery_date , customer, territory from `tabAuthority to Load` WHERE name NOT IN (SELECT l.name FROM `tabLogistics Planning Tool` l INNER JOIN `tabLogistics Planning Tool Detail` c ON(l.name=c.parent) WHERE c.status != 'Delivered') %s ORDER BY territory \"\n % conds, as_dict=1)\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018, [email protected] and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\n\nclass LogisticsPlanningTool(Document):\n def autoname(self):\n if self.customer:\n self.name = \"{0}-{1}-{2}\".format(self.customer, self.territory, self.schedule_delivery_date)\n else:\n self.name = \"{0}-{1}\".format(self.territory, self.schedule_delivery_date)\n\n\[email protected](True)\ndef get_atls(ps, pe, territory=None, customer=None, include_pending=None):\n conds = \"\"\n\n if territory and str(territory) != str(\"Nigeria\"):\n conds += ' AND territory = \"%s\" ' % territory\n if customer:\n conds += ' AND customer = \"%s\" ' % customer\n\n if not include_pending:\n conds += \" AND delivery_date BETWEEN DATE('%s') AND DATE('%s') \" % (ps, pe)\n\n return frappe.db.sql(\n \"SELECT name as authority_to_load, IFNULL(delivery_date, transaction_date) as delivery_date , customer, territory from `tabAuthority to Load` WHERE name NOT IN (SELECT l.name FROM `tabLogistics Planning Tool` l INNER JOIN `tabLogistics Planning Tool Detail` c ON(l.name=c.parent) WHERE c.status != 'Delivered') %s ORDER BY territory \" % (\n conds), as_dict=1)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import template
import random
register = template.Library()
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney",
"We cannot accomplish all that we need to do without working together\n-Bill Richardson",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz",
"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki",
"Communism will win.\n-Slavoj Zizek",
]
return random.choice(quotes)
|
normal
|
{
"blob_id": "6e73625adc10064cdb1b5f0546a4fc7320e9f5dc",
"index": 8366,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected]_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-4": "from django import template\nimport random\nregister = template.Library()\n\n\[email protected]_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-5": "from django import template\n\nimport random\n\nregister = template.Library()\n\n\[email protected]_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"Growth is never by mere chance; it is the result of forces working together.\\n-James Cash Penney\",\n \"We cannot accomplish all that we need to do without working together\\n-Bill Richardson\",\n \"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\\n-Gloria Macapagal Arroyo\",\n \"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\\n-Jacqueline Novogratz\",\n \"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\\n-Adrianne Palicki\",\n \"Communism will win.\\n-Slavoj Zizek\",\n ]\n return random.choice(quotes)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# from https://web.archive.org/web/20121220025758/http://xkcd.com/actuary.py.txt
# script written by Randall Munroe. Most comments by Emily Cain (although there were a few brief ones explaining how the program worked before I looked at it)
# Summary of program (by Emily):
# this program takes inputs of current ages and genders to calculate the probability any or all of those people will die in a certain time period.
# if you input a year (after the current year) or a number of years (less than the current year) the program will calculate the probability that anyone or everyone will die in that time period. Either way, the program also determines the number of years for certain probabilities of anyone or everyone dying.
# The program outputs these calculations in an easily readable form.
#!/usr/bin/python
import sys
import datetime
# The following description was written by Randall (the programmer).
# Calculates death probabilities based on Social Security
# actuarial tables for a given group of people.
# Run with a list of ages/genders and an optional timespan (or year in the future):
# python actuary.py 63m 80m 75f 73m 10
# or:
# python actuary.py 63m 80m 75f 73m 2022
# This will give statistics for that group, including
# various probabilities over 10 years. Years can be
# ommitted and it will still give some statistics.
# If "Years" exceeds the current calendar year, it will be interpreted as a date.
#this is a list of lists. The outer list contains two inner lists, each of which is an actuarial table--one for men and one for women
bothtables=[[0.00756, 0.00052, 0.00035, 0.00025, 0.00020, 0.00018, 0.00017, 0.00016, 0.00014, 0.00011, 0.00009, 0.00010, 0.00015, 0.00027, 0.00043, 0.00061, 0.00078, 0.00094, 0.00107, 0.00119, 0.00131, 0.00142, 0.00149, 0.00151, 0.00148, 0.00143, 0.00140, 0.00138, 0.00137, 0.00139, 0.00141, 0.00143, 0.00147, 0.00152, 0.00158, 0.00165, 0.00174, 0.00186, 0.00202, 0.00221, 0.00243, 0.00267, 0.00291, 0.00317, 0.00344, 0.00373, 0.00405, 0.00441, 0.00480, 0.00524, 0.00573, 0.00623, 0.00671, 0.00714, 0.00756, 0.00800, 0.00853, 0.00917, 0.00995, 0.01086, 0.01190, 0.01301, 0.01413, 0.01522, 0.01635, 0.01760, 0.01906, 0.02073, 0.02265, 0.02482, 0.02729, 0.03001, 0.03289, 0.03592, 0.03918, 0.04292, 0.04715, 0.05173, 0.05665, 0.06206, 0.06821, 0.07522, 0.08302, 0.09163, 0.10119, 0.11183, 0.12367, 0.13679, 0.15124, 0.16702, 0.18414, 0.20255, 0.22224, 0.24314, 0.26520, 0.28709, 0.30846, 0.32891, 0.34803, 0.36544, 0.38371, 0.40289, 0.42304, 0.44419, 0.46640, 0.48972, 0.51421, 0.53992, 0.56691, 0.59526, 0.62502, 0.65628, 0.68909, 0.72354, 0.75972, 0.79771, 0.83759, 0.87947, 0.92345, 0.96962], [0.00615, 0.00041, 0.00025, 0.00018, 0.00015, 0.00014, 0.00014, 0.00013, 0.00012, 0.00011, 0.00010, 0.00010, 0.00012, 0.00016, 0.00021, 0.00028, 0.00034, 0.00039, 0.00042, 0.00043, 0.00045, 0.00047, 0.00048, 0.00049, 0.00050, 0.00051, 0.00052, 0.00053, 0.00056, 0.00059, 0.00063, 0.00068, 0.00073, 0.00078, 0.00084, 0.00091, 0.00098, 0.00108, 0.00118, 0.00130, 0.00144, 0.00158, 0.00173, 0.00189, 0.00206, 0.00225, 0.00244, 0.00264, 0.00285, 0.00306, 0.00329, 0.00355, 0.00382, 0.00409, 0.00437, 0.00468, 0.00505, 0.00549, 0.00603, 0.00665, 0.00736, 0.00813, 0.00890, 0.00967, 0.01047, 0.01136, 0.01239, 0.01357, 0.01491, 0.01641, 0.01816, 0.02008, 0.02210, 0.02418, 0.02641, 0.02902, 0.03206, 0.03538, 0.03899, 0.04301, 0.04766, 0.05307, 0.05922, 0.06618, 0.07403, 0.08285, 0.09270, 0.10365, 0.11574, 0.12899, 0.14343, 0.15907, 0.17591, 0.19393, 0.21312, 0.23254, 0.25193, 0.27097, 0.28933, 0.30670, 0.32510, 0.34460, 0.36528, 0.38720, 0.41043, 0.43505, 0.46116, 0.48883, 0.51816, 0.54925, 0.58220, 0.61714, 0.65416, 0.69341, 0.73502, 0.77912, 0.82587, 0.87542, 0.92345, 0.96962]]
def deathprob(age, years): # a formula to determine the probability a given person will die, with a number of years as input
#negative ages = female (this is Randall's comment)
act=[] #this is a list that will hold the relevant actuarial tables, male or female
if age<0: # if age is a negative number the person is female
act=bothtables[1] # use the second table (females)
age=-1*age # multiply age by -1 to make it positive
else:
act=bothtables[0] # use the first table (males)
while(len(act)<int(age+years+2)): # slower/bloaiter but keeps things clean (Randall's comment)
act.append(act[-1]**0.5) # I'm not sure what this does
liveprob=1
i=0
iage=int(age) # age as integer
fage=age%1 # fraction after age if it's a mixed number? maybe?
while i<=years-1: #advance through this formula for each year between now and the date in question
thisyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] #the probability they will die this year is equal to this formula
liveprob*=1-thisyear # multiply the overall probability they will survive by the probability they will survive this year
i+=1
if years%1: # Amortizes risk of dying over a partial year, which is (Randall's comment)
# 1-P(living last full year)^(year fraction) (Randall's comment)
lastyear=(1-fage)*act[iage+i]+fage*act[iage+i+1]
lastyearlive=1-lastyear
lastyearlive=lastyearlive**((years%1))
liveprob*=lastyearlive
return 1-liveprob # return the probability they will die i.e. 1 - the probability they wil live
def proballdie(ages, years): # probability everyone in the list will die by a certain year, given the list "ages" and the number of years
probsliving=[]
for i in ages:
probsliving.append(1-deathprob(i, years))
prod=1
for i in probsliving:
prod*=(1-i)
return prod
def probanydie(ages, years): #returns the probability that anyone in the list dies
probsliving=[]
for i in ages:
probsliving.append(1-deathprob(i, years))
prod=1
for i in probsliving:
prod*=i
return 1-prod
def calcexp(ages, prob, flag): #calculates life expectancy based on the ages list, the probability of dying (5, 50, 95%), and whether or not it is "flagged" as calculating the probability that all or any die
i=0
for interval in (10, 1, 0.1, 0.01): #loops through the numbers at left
probs=0
while(probs<prob): #while the variable "probs" is less than the input probability
i+=interval #increase i by 10, 1, .1 or .01
if flag==0: #if we want to know the probability that the entire group will die
probs=proballdie(ages, i)
else:
probs=probanydie(ages, i) #if we want to know the probability that any member of the group will die
i-=interval #subtract the current interval from i before returning to start the for loop again with the subtracted i
return i #returns a float
ages=[] # creates an empty list that will hold the ages of everyone you want to know about
# print sys.argv[1:]
for arg in sys.argv[1:]: #for each argument you have entered except the first one (which is the script name)
gender=1
years=1.0
if arg[-1]=='m' or arg[-1]=='M': #If the last character of the argument is M or m, then the person is male and we will use their age as a positive number
try:
ages.append(1*float(arg[:-1])) #try adding all but the last character of the argument to the ages table. The last character indicates gender, preceding characters indicate age.
except:
print "Error parsing argument", arg
elif arg[-1]=='f' or arg[-1]=='F': #if the last character of the argument is F or f, then the person is female and we will use their age as a negative number
try:
ages.append(-1*float(arg[:-1])) #try adding all but the last character of the argument, times -1 because female, to the ages table. The last character indicates gender, preceding characters indicate age.
except:
print "Error parsing argument", arg
else: #if the input appears to be neither a male or female person with the age, it is probably the time period we want to know about
try:
years=float(arg)
break
except:
print "Error parsing argument", arg
# shows user how to enter input correctly if they do it wrong
if not sys.argv[1:]:
print "The format is 'actuary.py 15m 80f 23', with a list of ages and a number of years to run the projections."
raise SystemExit
if not ages:
print "No ages specified. Format is 12m, 17f, etc."
raise SystemExit
# print "Ages:", ages
# print "Years:", years
(datetime.date.today()+datetime.timedelta(days=365.242191*1)).year #adding date object to a timedelta object to get a date object. finds its year. does ??? with it
someone_years=[calcexp(ages, 0.05, 1), # this returns a list of floats, probably. Or strings???? used as strings below
calcexp(ages, 0.5, 1),
calcexp(ages, 0.95, 1)]
someone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[0])).year, # takes the above numbers and uses them to calculate a date based on today's date + total time.
(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[1])).year,
(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[2])).year]
print "There is a 5% chance of someone dying within", someone_years[0], "years (by", str(someone_dates[0])+")." #concatenates to avoid automatic space; must convert to string first.
print "There is a 50% chance of someone dying within", someone_years[1], "years (by", str(someone_dates[1])+")."
print "There is a 95% chance of someone dying within", someone_years[2], "years (by", str(someone_dates[2])+")."
print ""
if len(ages)>1: #only makes sense to do an everyone statement if there are multiple people.
everyone_years=[calcexp(ages, 0.05, 0),
calcexp(ages, 0.5, 0),
calcexp(ages, 0.95, 0)]
everyone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[0])).year,
(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[1])).year,
(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[2])).year]
print "There is a 5% chance of everyone dying within", everyone_years[0], "years (by", str(everyone_dates[0])+")."
print "There is a 50% chance of everyone dying within", everyone_years[1], "years (by", str(everyone_dates[1])+")."
print "There is a 95% chance of everyone dying within", everyone_years[2], "years (by", str(everyone_dates[2])+")."
if years: # if the user has input year
yearword="years"
if years==1: # changes from plural to singular if "years" is 1, so it says "1 year" instead of "1 years"
yearword="year"
print ""
if years>datetime.date.today().year: # Program assumes years under current year are a number of years, and years over current year refer to the date. If input years is greater than the current year...
years=years-datetime.date.today().year #...recalculate the "years" variable to be the number of years in the future that year is
if len(ages)>1: #if there is more than one person being analyzed, we will look at the probability of everyone dying
p=100*proballdie(ages, years) # converts probability into a percentage by multiplying by 100
printable="" # the percentage we will print out
if p<0.001: # if the percentage is really low/almost impossible
printable="<0.001"
elif p>99.99: # if the percentage is really high/almost guaranteed
printable=">99.99"
else:
printable=str(p)[:5] # if the percentage is not at one of the above extremes we want to see the actual percentage in our output
print "Probability of all dying in", years, yearword+": ", printable+"%" #outputs the info in an easily readable format
p=100*probanydie(ages, years) #regardless of how many people there are we will want to know the probability anyone dies
printable="" # the percentage we will print out
if p<0.001:
printable="<0.001" # if the percentage is really low/almost impossible
elif p>99.99:
printable=">99.99" # if the percentage is really high/almost guaranteed
print p # I don't know why he is choosing to do this, it seems odd/inconsistent with rest of program
else:
printable=str(p)[:5] # convert p to a string and assign the first 5 characters to printable
print "Probability of a death within", years, yearword+":", printable+"%" #outputs the info in an easily readable format
raise SystemExit #leaves the program
|
normal
|
{
"blob_id": "f0702c8555ef07aac9e667c35b5b5fd85820ec54",
"index": 4355,
"step-1": "# from https://web.archive.org/web/20121220025758/http://xkcd.com/actuary.py.txt\n\n# script written by Randall Munroe. Most comments by Emily Cain (although there were a few brief ones explaining how the program worked before I looked at it)\n\n# Summary of program (by Emily):\n\n# this program takes inputs of current ages and genders to calculate the probability any or all of those people will die in a certain time period. \n\n# if you input a year (after the current year) or a number of years (less than the current year) the program will calculate the probability that anyone or everyone will die in that time period. Either way, the program also determines the number of years for certain probabilities of anyone or everyone dying. \n\n# The program outputs these calculations in an easily readable form. \n\n\n#!/usr/bin/python\nimport sys\nimport datetime\n\n# The following description was written by Randall (the programmer). \n\n# Calculates death probabilities based on Social Security\n# actuarial tables for a given group of people.\n\n# Run with a list of ages/genders and an optional timespan (or year in the future):\n\n# python actuary.py 63m 80m 75f 73m 10\n\n# or:\n\n# python actuary.py 63m 80m 75f 73m 2022\n\n# This will give statistics for that group, including\n# various probabilities over 10 years. Years can be\n# ommitted and it will still give some statistics.\n# If \"Years\" exceeds the current calendar year, it will be interpreted as a date.\n\n\n#this is a list of lists. The outer list contains two inner lists, each of which is an actuarial table--one for men and one for women\nbothtables=[[0.00756, 0.00052, 0.00035, 0.00025, 0.00020, 0.00018, 0.00017, 0.00016, 0.00014, 0.00011, 0.00009, 0.00010, 0.00015, 0.00027, 0.00043, 0.00061, 0.00078, 0.00094, 0.00107, 0.00119, 0.00131, 0.00142, 0.00149, 0.00151, 0.00148, 0.00143, 0.00140, 0.00138, 0.00137, 0.00139, 0.00141, 0.00143, 0.00147, 0.00152, 0.00158, 0.00165, 0.00174, 0.00186, 0.00202, 0.00221, 0.00243, 0.00267, 0.00291, 0.00317, 0.00344, 0.00373, 0.00405, 0.00441, 0.00480, 0.00524, 0.00573, 0.00623, 0.00671, 0.00714, 0.00756, 0.00800, 0.00853, 0.00917, 0.00995, 0.01086, 0.01190, 0.01301, 0.01413, 0.01522, 0.01635, 0.01760, 0.01906, 0.02073, 0.02265, 0.02482, 0.02729, 0.03001, 0.03289, 0.03592, 0.03918, 0.04292, 0.04715, 0.05173, 0.05665, 0.06206, 0.06821, 0.07522, 0.08302, 0.09163, 0.10119, 0.11183, 0.12367, 0.13679, 0.15124, 0.16702, 0.18414, 0.20255, 0.22224, 0.24314, 0.26520, 0.28709, 0.30846, 0.32891, 0.34803, 0.36544, 0.38371, 0.40289, 0.42304, 0.44419, 0.46640, 0.48972, 0.51421, 0.53992, 0.56691, 0.59526, 0.62502, 0.65628, 0.68909, 0.72354, 0.75972, 0.79771, 0.83759, 0.87947, 0.92345, 0.96962], [0.00615, 0.00041, 0.00025, 0.00018, 0.00015, 0.00014, 0.00014, 0.00013, 0.00012, 0.00011, 0.00010, 0.00010, 0.00012, 0.00016, 0.00021, 0.00028, 0.00034, 0.00039, 0.00042, 0.00043, 0.00045, 0.00047, 0.00048, 0.00049, 0.00050, 0.00051, 0.00052, 0.00053, 0.00056, 0.00059, 0.00063, 0.00068, 0.00073, 0.00078, 0.00084, 0.00091, 0.00098, 0.00108, 0.00118, 0.00130, 0.00144, 0.00158, 0.00173, 0.00189, 0.00206, 0.00225, 0.00244, 0.00264, 0.00285, 0.00306, 0.00329, 0.00355, 0.00382, 0.00409, 0.00437, 0.00468, 0.00505, 0.00549, 0.00603, 0.00665, 0.00736, 0.00813, 0.00890, 0.00967, 0.01047, 0.01136, 0.01239, 0.01357, 0.01491, 0.01641, 0.01816, 0.02008, 0.02210, 0.02418, 0.02641, 0.02902, 0.03206, 0.03538, 0.03899, 0.04301, 0.04766, 0.05307, 0.05922, 0.06618, 0.07403, 0.08285, 0.09270, 0.10365, 0.11574, 0.12899, 0.14343, 0.15907, 0.17591, 0.19393, 0.21312, 0.23254, 0.25193, 0.27097, 0.28933, 0.30670, 0.32510, 0.34460, 0.36528, 0.38720, 0.41043, 0.43505, 0.46116, 0.48883, 0.51816, 0.54925, 0.58220, 0.61714, 0.65416, 0.69341, 0.73502, 0.77912, 0.82587, 0.87542, 0.92345, 0.96962]]\n\ndef deathprob(age, years): # a formula to determine the probability a given person will die, with a number of years as input \n #negative ages = female (this is Randall's comment)\n act=[] #this is a list that will hold the relevant actuarial tables, male or female \n if age<0: # if age is a negative number the person is female \n act=bothtables[1] # use the second table (females)\n age=-1*age # multiply age by -1 to make it positive\n else:\n act=bothtables[0] # use the first table (males)\n while(len(act)<int(age+years+2)): # slower/bloaiter but keeps things clean (Randall's comment)\n act.append(act[-1]**0.5) # I'm not sure what this does \n liveprob=1 \n i=0\n iage=int(age) # age as integer \n fage=age%1 # fraction after age if it's a mixed number? maybe? \n while i<=years-1: #advance through this formula for each year between now and the date in question \n thisyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] #the probability they will die this year is equal to this formula\n liveprob*=1-thisyear # multiply the overall probability they will survive by the probability they will survive this year \n i+=1\n if years%1: # Amortizes risk of dying over a partial year, which is (Randall's comment)\n # 1-P(living last full year)^(year fraction) (Randall's comment)\n lastyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] \n lastyearlive=1-lastyear\n lastyearlive=lastyearlive**((years%1))\n liveprob*=lastyearlive\n return 1-liveprob # return the probability they will die i.e. 1 - the probability they wil live \n\ndef proballdie(ages, years): # probability everyone in the list will die by a certain year, given the list \"ages\" and the number of years \n probsliving=[]\n for i in ages:\n probsliving.append(1-deathprob(i, years))\n prod=1\n for i in probsliving:\n prod*=(1-i)\n return prod\n\ndef probanydie(ages, years): #returns the probability that anyone in the list dies\n probsliving=[]\n for i in ages: \n probsliving.append(1-deathprob(i, years))\n prod=1\n for i in probsliving:\n prod*=i\n return 1-prod\n\ndef calcexp(ages, prob, flag): #calculates life expectancy based on the ages list, the probability of dying (5, 50, 95%), and whether or not it is \"flagged\" as calculating the probability that all or any die \n i=0\n for interval in (10, 1, 0.1, 0.01): #loops through the numbers at left\n probs=0\n while(probs<prob): #while the variable \"probs\" is less than the input probability\n i+=interval #increase i by 10, 1, .1 or .01\n if flag==0: #if we want to know the probability that the entire group will die \n probs=proballdie(ages, i)\n else:\n probs=probanydie(ages, i) #if we want to know the probability that any member of the group will die\n i-=interval #subtract the current interval from i before returning to start the for loop again with the subtracted i \n return i #returns a float \n\nages=[] # creates an empty list that will hold the ages of everyone you want to know about\n# print sys.argv[1:]\nfor arg in sys.argv[1:]: #for each argument you have entered except the first one (which is the script name)\n gender=1\n years=1.0\n if arg[-1]=='m' or arg[-1]=='M': #If the last character of the argument is M or m, then the person is male and we will use their age as a positive number\n try:\n ages.append(1*float(arg[:-1])) #try adding all but the last character of the argument to the ages table. The last character indicates gender, preceding characters indicate age. \n except:\n print \"Error parsing argument\", arg\n elif arg[-1]=='f' or arg[-1]=='F': #if the last character of the argument is F or f, then the person is female and we will use their age as a negative number \n try:\n ages.append(-1*float(arg[:-1])) #try adding all but the last character of the argument, times -1 because female, to the ages table. The last character indicates gender, preceding characters indicate age. \n except:\n print \"Error parsing argument\", arg\n else: #if the input appears to be neither a male or female person with the age, it is probably the time period we want to know about\n try:\n years=float(arg)\n break\n except:\n print \"Error parsing argument\", arg\n\n# shows user how to enter input correctly if they do it wrong\nif not sys.argv[1:]:\n print \"The format is 'actuary.py 15m 80f 23', with a list of ages and a number of years to run the projections.\"\n raise SystemExit\nif not ages:\n print \"No ages specified. Format is 12m, 17f, etc.\"\n raise SystemExit\n\n# print \"Ages:\", ages\n# print \"Years:\", years\n\n(datetime.date.today()+datetime.timedelta(days=365.242191*1)).year #adding date object to a timedelta object to get a date object. finds its year. does ??? with it \nsomeone_years=[calcexp(ages, 0.05, 1), # this returns a list of floats, probably. Or strings???? used as strings below\n calcexp(ages, 0.5, 1),\n calcexp(ages, 0.95, 1)]\nsomeone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[0])).year, # takes the above numbers and uses them to calculate a date based on today's date + total time. \n (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[1])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[2])).year]\nprint \"There is a 5% chance of someone dying within\", someone_years[0], \"years (by\", str(someone_dates[0])+\").\" #concatenates to avoid automatic space; must convert to string first. \nprint \"There is a 50% chance of someone dying within\", someone_years[1], \"years (by\", str(someone_dates[1])+\").\"\nprint \"There is a 95% chance of someone dying within\", someone_years[2], \"years (by\", str(someone_dates[2])+\").\"\nprint \"\"\n\nif len(ages)>1: #only makes sense to do an everyone statement if there are multiple people. \n everyone_years=[calcexp(ages, 0.05, 0),\n calcexp(ages, 0.5, 0),\n calcexp(ages, 0.95, 0)]\n everyone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[0])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[1])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[2])).year]\n print \"There is a 5% chance of everyone dying within\", everyone_years[0], \"years (by\", str(everyone_dates[0])+\").\"\n print \"There is a 50% chance of everyone dying within\", everyone_years[1], \"years (by\", str(everyone_dates[1])+\").\"\n print \"There is a 95% chance of everyone dying within\", everyone_years[2], \"years (by\", str(everyone_dates[2])+\").\"\n\n\nif years: # if the user has input year \n yearword=\"years\" \n if years==1: # changes from plural to singular if \"years\" is 1, so it says \"1 year\" instead of \"1 years\"\n yearword=\"year\"\n\n print \"\"\n if years>datetime.date.today().year: # Program assumes years under current year are a number of years, and years over current year refer to the date. If input years is greater than the current year...\n years=years-datetime.date.today().year #...recalculate the \"years\" variable to be the number of years in the future that year is\n if len(ages)>1: #if there is more than one person being analyzed, we will look at the probability of everyone dying \n p=100*proballdie(ages, years) # converts probability into a percentage by multiplying by 100 \n printable=\"\" # the percentage we will print out \n if p<0.001: # if the percentage is really low/almost impossible \n printable=\"<0.001\"\n elif p>99.99: # if the percentage is really high/almost guaranteed \n printable=\">99.99\" \n else:\n printable=str(p)[:5] # if the percentage is not at one of the above extremes we want to see the actual percentage in our output\n print \"Probability of all dying in\", years, yearword+\": \", printable+\"%\" #outputs the info in an easily readable format\n p=100*probanydie(ages, years) #regardless of how many people there are we will want to know the probability anyone dies \n printable=\"\" # the percentage we will print out \n if p<0.001:\n printable=\"<0.001\" # if the percentage is really low/almost impossible\n elif p>99.99:\n printable=\">99.99\" # if the percentage is really high/almost guaranteed \n print p # I don't know why he is choosing to do this, it seems odd/inconsistent with rest of program \n else:\n printable=str(p)[:5] # convert p to a string and assign the first 5 characters to printable \n print \"Probability of a death within\", years, yearword+\":\", printable+\"%\" #outputs the info in an easily readable format\nraise SystemExit #leaves the program \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
num=int(input())
i=10
while i>=1:
print(i,end=" ")
i-=1
|
normal
|
{
"blob_id": "ec0113dbd79e936e614bb7ee7e48d29aa616d511",
"index": 7389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i >= 1:\n print(i, end=' ')\n i -= 1\n",
"step-3": "num = int(input())\ni = 10\nwhile i >= 1:\n print(i, end=' ')\n i -= 1\n",
"step-4": "num=int(input())\r\ni=10\r\nwhile i>=1:\r\n print(i,end=\" \")\r\n i-=1\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import sys
import errno
# read first line from stdin and discard it
first_line = sys.stdin.readline()
# print all other lines
for line in sys.stdin:
try:
print line,
except IOError, e:
if e.errno == errno.EPIPE:
exit(0)
|
normal
|
{
"blob_id": "bd06b04666ade1e7591b02f8211bc9b62fd08936",
"index": 791,
"step-1": "#!/usr/bin/env python\nimport sys\nimport errno\n\n# read first line from stdin and discard it\nfirst_line = sys.stdin.readline()\n\n# print all other lines\nfor line in sys.stdin:\n try:\n print line,\n except IOError, e:\n if e.errno == errno.EPIPE:\n exit(0)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
def math_builtins():
assert abs(-123) == 123
assert abs(-123.456) == 123.456
assert abs(2+3j) == math.sqrt(2**2 + 3**2)
assert divmod(5, 2) == (2, 1)
assert max(1, 2, 3, 4) == 4
assert min(1, 2, 3, 4) == 1
a = 2
b = 3
c = 7
assert pow(a, b) == a ** b
assert pow(a, b, c) == a ** b % c
assert round(123.05) == 123
assert round(123.65) == 124
assert round(-123.05) == -123
assert round(-123.65) == -124
assert round(123.65, 1) == 123.7
assert round(-123.65, 1) == -123.7
lst = [1, 2, 3]
assert sum(lst) == 6
def math_module_constants():
assert math.pi == 3.141592653589793
assert math.tau == 6.283185307179586
assert math.e == 2.718281828459045
x = float('NaN')
assert math.isnan(x)
x = float('inf')
assert math.isinf(x)
x = math.inf
assert math.isinf(x)
x = -math.inf
assert math.isinf(x)
def math_module():
x = -1.23
assert math.fabs(x) == 1.23
if __name__ == "__main__":
math_builtins()
math_module_constants()
math_module()
|
normal
|
{
"blob_id": "c77db71844c65eb96946ac0cc384de43ad49ca99",
"index": 6007,
"step-1": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == '__main__':\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-4": "import math\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == '__main__':\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-5": "import math\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2+3j) == math.sqrt(2**2 + 3**2)\n\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n\n assert round(123.05) == 123\n assert round(123.65) == 124\n\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n\n x = float('NaN')\n assert math.isnan(x)\n\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == \"__main__\":\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.test import TestCase
from .models import Post, Category, Tag
# Create your tests here.
class TestPost(TestCase):
def test_str(self):
my_title = Post(title='This is a basic title for a basic test case')
self.assertEquals(str(my_title), 'This is a basic title for a basic test case')
class TestCategory(TestCase):
def test_str(self):
category = Category(name='Test Category')
self.assertEquals(str(category), 'Test Category')
class TestTag(TestCase):
def test_str(self):
tag = Tag(name='Test Tag')
self.assertEquals(str(tag), 'Test Tag')
|
normal
|
{
"blob_id": "825c9510b055c0fa570f577b1c9616e8bde9c98b",
"index": 7653,
"step-1": "<mask token>\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-2": "<mask token>\n\n\nclass TestPost(TestCase):\n <mask token>\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-3": "<mask token>\n\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title),\n 'This is a basic title for a basic test case')\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-4": "from django.test import TestCase\nfrom .models import Post, Category, Tag\n\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title),\n 'This is a basic title for a basic test case')\n\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n",
"step-5": "from django.test import TestCase\n\nfrom .models import Post, Category, Tag\n\n# Create your tests here.\n\nclass TestPost(TestCase):\n\n def test_str(self):\n my_title = Post(title='This is a basic title for a basic test case')\n self.assertEquals(str(my_title), 'This is a basic title for a basic test case')\n\nclass TestCategory(TestCase):\n\n def test_str(self):\n category = Category(name='Test Category')\n self.assertEquals(str(category), 'Test Category')\n\nclass TestTag(TestCase):\n\n def test_str(self):\n tag = Tag(name='Test Tag')\n self.assertEquals(str(tag), 'Test Tag')\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.preprocessing import normalize
def blackbox_function(x, y=None, sim=False):
if sim:
if y is None:
return -x ** 2 + 6
else:
return -(x+y) ** 2 + 6
# Reading the magnitude of the N170 data
filename = 'Output.txt'
lines = open(filename).read().splitlines()
try:
latency = float(lines[-1])
except ValueError:
print('Failed to convert value to float')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
latency = float(lines[-1])
except IndexError:
print('The latent file is empty')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
latency = float(lines[-1])
return latency
def obtain_confidence(sim=False):
if sim:
noise = np.random.normal(0, 0.60, size=1)[0]
return noise
# Reading the Confidence levels of the target value
filename = 'Confidence.txt'
lines = open(filename).read().splitlines()
try:
confidence = float(lines[-1])
except ValueError:
print('Failed to convert confidence value to float')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
confidence = float(lines[-1])
except IndexError:
print('The confidence file is empty')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
confidence = float(lines[-1])
return confidence
def posterior(optimizer, x_obs, y_obs, grid):
optimizer._gp.fit(x_obs, y_obs)
mu, sigma = optimizer._gp.predict(grid, return_std=True)
return mu, sigma
def plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):
fig = plt.figure(figsize=(16, 10))
steps = len(optimizer.res)
fig.suptitle(
'Gaussian Process and Utility Function After {} Steps'.format(steps),
fontdict={'size': 30}
)
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
# x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
# y_obs = np.array([res["target"] for res in optimizer.res])
x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
y_obs = np.array([res["target"] for res in optimizer.res])
y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)
y_obs = y_obs.flatten()
mu, sigma = posterior(optimizer, x_obs, y_obs, x)
utility = utility_function.utility(x, optimizer._gp, y_obs.max())
# Unnormalize data
mu = mu*norm
sigma = sigma*norm
y_obs = y_obs*norm
if y is not None:
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
# if(bounds == "large"):
# axis.set_xlim((-1, 1))
# else:
# axis.set_xlim((0, 1))
# axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size': 20})
axis.set_xlabel('x', fontdict={'size': 20})
# utility = utility_function.utility(x, optimizer._gp, 0)
acq.plot(x, utility, label='Utility Function', color='purple')
acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# if (bounds == "large"):
# acq.set_xlim((-1, 1))
# else:
# acq.set_xlim((0, 1))
# acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylabel('Utility', fontdict={'size': 20})
acq.set_xlabel('x', fontdict={'size': 20})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
fig.savefig(logpath+'/fig_{}'.format(i))
|
normal
|
{
"blob_id": "6defbe25fc17e53df2fc4d32886bba1cb141bdfd",
"index": 7018,
"step-1": "<mask token>\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n<mask token>\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-2": "<mask token>\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n<mask token>\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-3": "<mask token>\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n optimizer._gp.fit(x_obs, y_obs)\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom sklearn.preprocessing import normalize\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n optimizer._gp.fit(x_obs, y_obs)\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom sklearn.preprocessing import normalize\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x+y) ** 2 + 6\n\n # Reading the magnitude of the N170 data\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.60, size=1)[0]\n return noise\n\n # Reading the Confidence levels of the target value\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n\n optimizer._gp.fit(x_obs, y_obs)\n\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle(\n 'Gaussian Process and Utility Function After {} Steps'.format(steps),\n fontdict={'size': 30}\n )\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n\n # x_obs = np.array([[res[\"params\"][\"x\"]] for res in optimizer.res])\n # y_obs = np.array([res[\"target\"] for res in optimizer.res])\n\n x_obs = np.array([[res[\"params\"][\"x\"]] for res in optimizer.res])\n y_obs = np.array([res[\"target\"] for res in optimizer.res])\n\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n\n # Unnormalize data\n mu = mu*norm\n sigma = sigma*norm\n y_obs = y_obs*norm\n\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n\n axis.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),\n alpha=.6, fc='c', ec='None', label='95% confidence interval')\n # if(bounds == \"large\"):\n # axis.set_xlim((-1, 1))\n # else:\n # axis.set_xlim((0, 1))\n # axis.set_ylim((None, None))\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n\n # utility = utility_function.utility(x, optimizer._gp, 0)\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)\n\n # if (bounds == \"large\"):\n # acq.set_xlim((-1, 1))\n # else:\n # acq.set_xlim((0, 1))\n # acq.set_ylim((0, np.max(utility) + 0.5))\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)\n\n fig.savefig(logpath+'/fig_{}'.format(i))\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from dataclasses import dataclass
from datetime import date
@dataclass
class Book:
id: int
title: str
author: str
genre: str
published: date
status: str = 'Available'
def __str__(self):
return f'{self.id}: {self.title} by {self.author}'
def get_more_information(self):
return f"Gatunek: {self.genre}\nData publikacji: {self.published}\nStatus: {self.status}"
|
normal
|
{
"blob_id": "dc13ca17bff8e2a5254c7758bd7274926bafd454",
"index": 5312,
"step-1": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n <mask token>\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n",
"step-4": "from dataclasses import dataclass\nfrom datetime import date\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n",
"step-5": "from dataclasses import dataclass\nfrom datetime import date\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return f\"Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}\"\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from mf_app import db
from mf_app.models import User
db.create_all()
#test input data
admin = User('admin', '[email protected]', 'admin')
guest = User('guest', '[email protected]', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users)
|
normal
|
{
"blob_id": "99c2bd56deccc327faf659e91fc1fd0f6ff7a219",
"index": 3932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\n<mask token>\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\n<mask token>\nprint(users)\n",
"step-3": "<mask token>\ndb.create_all()\nadmin = User('admin', '[email protected]', 'admin')\nguest = User('guest', '[email protected]', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-4": "from mf_app import db\nfrom mf_app.models import User\ndb.create_all()\nadmin = User('admin', '[email protected]', 'admin')\nguest = User('guest', '[email protected]', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-5": "from mf_app import db\nfrom mf_app.models import User\n\ndb.create_all()\n\n#test input data\nadmin = User('admin', '[email protected]', 'admin')\nguest = User('guest', '[email protected]', 'guest')\n\ndb.session.add(admin)\ndb.session.add(guest)\n\ndb.session.commit()\n\nusers = User.query.all()\nprint(users)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# Creates a new task from a given task definition json and starts on
# all instances in the given cluster name
# USAGE:
# python ecs-tasker.py <task_definition_json_filename> <cluster_name>
# EXAMPLE:
# python ecs-tasker.py ecs-task-stage.json cops-cluster
import boto3
import json
import sys
import time
from pprint import pprint
fname = sys.argv[1]
cluster_name = sys.argv[2]
service_name = 'fhid-service-prod'
target_group_arn = 'arn:aws:elasticloadbalancing:us-east-1:188894168332:targetgroup/tg-fhid-prod/97843ffd9cf6b6c0'
container_name = 'fhid-prod'
container_port = 8090
desired_count = 2
sleeptime = 10
role_arn = 'arn:aws:iam::188894168332:role/ecrAccess'
fmt_logs_uri = "https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#logEventViewer:group=awslogs-ecs;stream=awslogs-fhid-prod/fhid-prod/{0}"
with open(fname,'rb') as f:
task = json.load(f)
s = boto3.session.Session()
c = s.client('ecs', region_name='us-east-1')
def create_service(task_definition):
tries = 0
max_tries = 3
print("Attempt %d of %d..." % (tries, max_tries))
while 1:
if tries > max_tries:
print("Max tries exceeded, exiting with failure....")
sys.exit(1)
try:
response = c.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=[
{
'targetGroupArn': target_group_arn,
'containerName': container_name,
'containerPort': container_port
},
],
desiredCount=desired_count,
role=role_arn,
deploymentConfiguration={
'maximumPercent': 200,
'minimumHealthyPercent': 100
},
placementConstraints=[],
placementStrategy=[{
"field": "memory",
"type": "binpack"
}
]
)
print response
break
except Exception as e:
print("Exception creating service: '%s'" % str(e))
tries += 1
print("Sleeping...")
time.sleep(5)
container_instances = c.list_container_instances(cluster=cluster_name).get('containerInstanceArns')
response = c.register_task_definition(containerDefinitions=task.get('containerDefinitions'),
networkMode=task.get('networkMode'),
taskRoleArn=task.get('taskRoleArn'),
family=task.get('family'))
definition = response.get('taskDefinition').get('taskDefinitionArn')
def task_tester():
retries = 1
max_retries = 5
tasks = []
while 1:
print("Attempt %d of %d..." % (retries, max_retries))
if retries > max_retries:
print("Too many task start failures")
sys.exit(1)
tasker = c.start_task(taskDefinition=definition,
cluster=cluster_name,
containerInstances=[container_instances[0]]) # max of 10 instances
print("Sleeping %d seconds to wait for tasks to start..." % sleeptime)
time.sleep(sleeptime)
print("Number of tasks started: %d" % len(tasker.get('tasks')))
if len(tasker.get('failures')) > 0:
print("Number of failed tasks: %d" % len(tasker.get('failures')))
for failure in tasker.get('failures'):
print(failure)
if failure.get('reason') == "RESOURCE:MEMORY":
retries += 1
else:
break
all_tasks = c.list_tasks(cluster=cluster_name)
all_tasks_arns = all_tasks.get('taskArns')
for task_arn in c.describe_tasks(cluster=cluster_name, tasks=all_tasks_arns).get('tasks'):
if task_arn.get('taskDefinitionArn') == definition:
tasks.append(task_arn.get('taskArn'))
status = c.describe_tasks(cluster=cluster_name,
tasks=tasks)
return tasks
tasks = task_tester()
# check on status of tasks and exit with failure if
# containers don't stay running
count = 0
maxCount = 10
FAILED = False
RUNNING = False
runningCount = 0
task_definition_arn = ""
task_arn = ""
while 1:
count += 1
status = c.describe_tasks(cluster=cluster_name,
tasks=tasks)
for task in status.get('tasks'):
if task.get('lastStatus') == "STOPPED":
print("CONTAINER FAILED:")
pprint(status)
FAILED = True
try:
guid = task.get('taskArn').split('/')[-1]
print("LOGS URL: %s" % fmt_logs_uri.format(guid))
except:
pass
break
if task.get('lastStatus') == "PENDING":
print("Task still PENDING...sleeping")
else:
pprint(status)
task_definition_arn = task.get('taskDefinitionArn')
task_arn = task.get("taskArn")
RUNNING = True
break
if count > maxCount:
print("Too many iterations, exiting status failed.")
FAILED = True
if FAILED:
break
if RUNNING:
runningCount += 1
if runningCount > 3:
create_service(task_definition_arn)
c.stop_task(cluster=cluster_name,
task=task_arn,
reason="Temporary task for pipeline build")
break
time.sleep(5)
if FAILED:
sys.exit(1)
else:
sys.exit(0)
|
normal
|
{
"blob_id": "3b613ec75088d6d9a645443df2bbc2f33b80000b",
"index": 6984,
"step-1": "#!/usr/bin/env python\n# Creates a new task from a given task definition json and starts on\n# all instances in the given cluster name\n# USAGE:\n# python ecs-tasker.py <task_definition_json_filename> <cluster_name>\n# EXAMPLE:\n# python ecs-tasker.py ecs-task-stage.json cops-cluster\n\nimport boto3\nimport json\nimport sys\nimport time\nfrom pprint import pprint\n\nfname = sys.argv[1]\ncluster_name = sys.argv[2]\nservice_name = 'fhid-service-prod'\ntarget_group_arn = 'arn:aws:elasticloadbalancing:us-east-1:188894168332:targetgroup/tg-fhid-prod/97843ffd9cf6b6c0'\ncontainer_name = 'fhid-prod'\ncontainer_port = 8090\ndesired_count = 2\nsleeptime = 10\nrole_arn = 'arn:aws:iam::188894168332:role/ecrAccess'\n\nfmt_logs_uri = \"https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#logEventViewer:group=awslogs-ecs;stream=awslogs-fhid-prod/fhid-prod/{0}\"\n\nwith open(fname,'rb') as f:\n task = json.load(f)\n\ns = boto3.session.Session()\nc = s.client('ecs', region_name='us-east-1')\n\ndef create_service(task_definition):\n tries = 0\n max_tries = 3\n print(\"Attempt %d of %d...\" % (tries, max_tries))\n while 1:\n if tries > max_tries:\n print(\"Max tries exceeded, exiting with failure....\")\n sys.exit(1)\n try:\n response = c.create_service(\n cluster=cluster_name,\n serviceName=service_name,\n taskDefinition=task_definition,\n loadBalancers=[\n {\n 'targetGroupArn': target_group_arn,\n 'containerName': container_name,\n 'containerPort': container_port\n },\n ],\n desiredCount=desired_count,\n role=role_arn,\n deploymentConfiguration={\n 'maximumPercent': 200,\n 'minimumHealthyPercent': 100\n },\n placementConstraints=[],\n placementStrategy=[{\n \"field\": \"memory\",\n \"type\": \"binpack\"\n }\n ]\n )\n\n print response\n break\n except Exception as e:\n print(\"Exception creating service: '%s'\" % str(e))\n tries += 1\n print(\"Sleeping...\")\n time.sleep(5)\ncontainer_instances = c.list_container_instances(cluster=cluster_name).get('containerInstanceArns')\n\nresponse = c.register_task_definition(containerDefinitions=task.get('containerDefinitions'),\n networkMode=task.get('networkMode'),\n taskRoleArn=task.get('taskRoleArn'),\n family=task.get('family'))\n\ndefinition = response.get('taskDefinition').get('taskDefinitionArn')\n\n\ndef task_tester():\n retries = 1\n max_retries = 5\n tasks = []\n while 1:\n print(\"Attempt %d of %d...\" % (retries, max_retries))\n if retries > max_retries:\n print(\"Too many task start failures\")\n sys.exit(1)\n tasker = c.start_task(taskDefinition=definition,\n cluster=cluster_name,\n containerInstances=[container_instances[0]]) # max of 10 instances\n\n print(\"Sleeping %d seconds to wait for tasks to start...\" % sleeptime)\n time.sleep(sleeptime)\n print(\"Number of tasks started: %d\" % len(tasker.get('tasks')))\n if len(tasker.get('failures')) > 0:\n print(\"Number of failed tasks: %d\" % len(tasker.get('failures')))\n for failure in tasker.get('failures'):\n print(failure)\n if failure.get('reason') == \"RESOURCE:MEMORY\":\n retries += 1\n else:\n break\n\n all_tasks = c.list_tasks(cluster=cluster_name)\n all_tasks_arns = all_tasks.get('taskArns')\n for task_arn in c.describe_tasks(cluster=cluster_name, tasks=all_tasks_arns).get('tasks'):\n if task_arn.get('taskDefinitionArn') == definition:\n tasks.append(task_arn.get('taskArn'))\n\n status = c.describe_tasks(cluster=cluster_name,\n tasks=tasks)\n return tasks\n\ntasks = task_tester()\n# check on status of tasks and exit with failure if \n# containers don't stay running\ncount = 0\nmaxCount = 10\nFAILED = False\nRUNNING = False\nrunningCount = 0\ntask_definition_arn = \"\"\ntask_arn = \"\"\nwhile 1:\n count += 1\n status = c.describe_tasks(cluster=cluster_name,\n tasks=tasks)\n for task in status.get('tasks'):\n\n if task.get('lastStatus') == \"STOPPED\":\n print(\"CONTAINER FAILED:\")\n pprint(status)\n FAILED = True\n try:\n guid = task.get('taskArn').split('/')[-1]\n print(\"LOGS URL: %s\" % fmt_logs_uri.format(guid))\n except:\n pass\n break\n if task.get('lastStatus') == \"PENDING\":\n print(\"Task still PENDING...sleeping\")\n else:\n pprint(status)\n task_definition_arn = task.get('taskDefinitionArn')\n task_arn = task.get(\"taskArn\")\n RUNNING = True\n break\n if count > maxCount:\n print(\"Too many iterations, exiting status failed.\")\n FAILED = True\n if FAILED:\n break\n if RUNNING:\n runningCount += 1\n if runningCount > 3:\n create_service(task_definition_arn)\n c.stop_task(cluster=cluster_name,\n task=task_arn,\n reason=\"Temporary task for pipeline build\")\n break\n time.sleep(5)\n\nif FAILED:\n sys.exit(1)\nelse:\n sys.exit(0)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes,
kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes,
kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.shape)
out = self.bn2(self.conv2(out))
# print(out.shape)
out += self.shortcut(x)
# print(out.shape)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.shape)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(
nn.Linear(512 * block.expansion, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
# print(strides)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
# print(nn.Sequential(*layers))
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
# print(out.shape)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
# print(x.shape)
x = self.fc1(x)
# print(x.shape)
x = torch.sigmoid(x)
# print(x.shape)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
# print(scores.shape)
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34(in_plane):
return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])
def ResNet50(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])
def ResNet101(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])
def ResNet152(in_plane):
return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])
if __name__ == '__main__':
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
x0 = torch.rand(128, 1, 64, 64).to(device)
net = ResNet34(1).to(device)
out = net(x0, x0)
print(out)
|
normal
|
{
"blob_id": "d3f42f329246164cdb6113df3da0eb2d3203b2a9",
"index": 7114,
"step-1": "<mask token>\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n <mask token>\n <mask token>\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=\n stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=\n stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\n\ndef ResNet34(in_plane):\n return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])\n\n\ndef ResNet50(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])\n\n\ndef ResNet101(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\n\n<mask token>\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes,\n kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes,\n kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # print(out.shape)\n out = self.bn2(self.conv2(out))\n # print(out.shape)\n out += self.shortcut(x)\n # print(out.shape)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # print(out.shape)\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(\n nn.Linear(512 * block.expansion, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n # print(strides)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n # print(nn.Sequential(*layers))\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n # print(out.shape)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n # print(x.shape)\n x = self.fc1(x)\n # print(x.shape)\n x = torch.sigmoid(x)\n # print(x.shape)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n # print(scores.shape)\n return scores\n\n\nclass MLP_classifier(nn.Module):\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc2 = nn.Sequential(\n nn.Linear(hidden_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\n\ndef ResNet34(in_plane):\n return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])\n\n\ndef ResNet50(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])\n\n\ndef ResNet101(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\nif __name__ == '__main__':\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n x0 = torch.rand(128, 1, 64, 64).to(device)\n net = ResNet34(1).to(device)\n out = net(x0, x0)\n print(out)",
"step-ids": [
13,
15,
18,
22,
25
]
}
|
[
13,
15,
18,
22,
25
] |
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# driver = webdriver.Chrome('C:/automation/chromedriver')
# wait = WebDriverWait(driver, 15)
class Methodos(object):
def __init__(self,driver):
self.driver=driver
self.wait=WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
def GetElementId(self,idtext):
return self.wait.until(EC.element_to_be_clickable(By.ID,idtext))
# def SendText(driver,wait,_id,text):
# e= wait.until(EC.element_to_be_clickable(By.ID,_id))
# e.clear()
# e.send_keys(text)
# driver.implicitly_wait(5)
# def Click(driver,wait,id):
# e=wait.until(EC.element_to_be_clickable((By.ID,id)))
# e.click()
|
normal
|
{
"blob_id": "0a23b16329d8b599a4ee533604d316bdfe4b579a",
"index": 4832,
"step-1": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n <mask token>\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n\n def GetElementId(self, idtext):\n return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))\n",
"step-4": "from selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n\n def GetElementId(self, idtext):\n return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))\n",
"step-5": "from selenium.webdriver.common.keys import Keys\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\n# driver = webdriver.Chrome('C:/automation/chromedriver')\r\n# wait = WebDriverWait(driver, 15)\r\nclass Methodos(object):\r\n def __init__(self,driver):\r\n self.driver=driver\r\n self.wait=WebDriverWait(self.driver, 15)\r\n\r\n def SendText(self, _id, text):\r\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\r\n e.clear()\r\n e.send_keys(text)\r\n self.driver.implicitly_wait(5)\r\n\r\n def Click(self, id):\r\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\r\n e.click()\r\n\r\n\r\n def GetElementId(self,idtext):\r\n return self.wait.until(EC.element_to_be_clickable(By.ID,idtext))\r\n\r\n# def SendText(driver,wait,_id,text):\r\n# e= wait.until(EC.element_to_be_clickable(By.ID,_id))\r\n# e.clear()\r\n# e.send_keys(text)\r\n# driver.implicitly_wait(5)\r\n\r\n\r\n\r\n# def Click(driver,wait,id):\r\n# e=wait.until(EC.element_to_be_clickable((By.ID,id)))\r\n# e.click()\r\n\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sqlite3
# cur.execute('CREATE TABLE admin(username TEXT,password TEXT)')
# conn.commit()
# cur.execute("INSERT INTO admin VALUES('nilesh','nilesh')")
# conn.commit()
def verif_admin(username, password):
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(username)
print(password)
data = cur.execute('SELECT password FROM admin WHERE username = "{}"'.format(username)).fetchall()[0][0]
conn.close()
if password == data:
return True
else:
return False
except:
return False
def add_product(id_, name, quantity, cost):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(id_, name, quantity, cost)
try:
quantity = int(quantity)
cost = int(cost)
print(id_, name, quantity, cost)
print(type(id_), type(name), type(quantity), type(cost))
check = cur.execute(f"SELECT * FROM products WHERE id = '{id_}'").fetchall()
if len(check) > 0:
return False, " This Product Already Exist Try Updating "
else:
cur.execute('INSERT INTO products VALUES("{}","{}",{},{})'.format(id_, name, quantity, cost))
conn.commit()
conn.close()
return True, " Product Added Successfully "
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def get_product_detail(prod_id):
if prod_id == '':
return False, " Enter Product Id "
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute(f"SELECT rowid,* FROM products where id='{prod_id}'").fetchall()
conn.close()
if len(data) == 0:
return False, " Product Don't Exist "
return True, data
def update_delete_product(rowid, id_, name, quantity, cost, qry):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
try:
quantity = int(quantity)
cost = int(cost)
if qry == 'update':
cur.execute(
f"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}")
conn.commit()
return True, " Product Updated Successfully "
if qry == "delete":
cur.execute(f"DELETE FROM products WHERE rowid={rowid} ")
conn.commit()
return True, " Product Deleted Successfully "
conn.commit()
conn.close()
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def showProducts_all():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM products").fetchall()
return True, data
def added_to_cart(prod_id, qry):
if prod_id == '':
return False, " Please Enter Product Id ",1
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
if qry == "add":
try:
cur.execute("""CREATE TABLE cart(
id TEXT,
name TEXT,
quantity INTEGER,
cost INTEGER) """)
except:
pass
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
cur.execute(f"""INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
elif len(cart_check) > 0:
cur.execute(
f"""UPDATE cart SET quantity = {(cart_check[0][2] + 1)},cost={(cart_check[0][3] + data[0][3])} WHERE id ='{prod_id}'""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
if qry == "remove":
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Doesn't Exist ",all_prods
elif len(cart_check) > 0:
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cur.execute(f"UPDATE products SET quantity = {(data[0][2]+cart_check[0][2])} WHERE id ='{prod_id}'")
conn.commit()
cur.execute(f"DELETE FROM cart WHERE id = '{prod_id}'")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Deleted Successfully ",all_prods
conn.close()
def get_cost():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM cart").fetchall()
cost = 0
for i in data:
cost = cost+i[3]
return cost
def done_Drp():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
cur.execute("DROP TABLE cart")
conn.commit()
|
normal
|
{
"blob_id": "88d0ced41a8f176a8a12bba6406b4162ea6dfc52",
"index": 9308,
"step-1": "<mask token>\n\n\ndef update_delete_product(rowid, id_, name, quantity, cost, qry):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n try:\n quantity = int(quantity)\n cost = int(cost)\n if qry == 'update':\n cur.execute(\n f\"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}\"\n )\n conn.commit()\n return True, ' Product Updated Successfully '\n if qry == 'delete':\n cur.execute(f'DELETE FROM products WHERE rowid={rowid} ')\n conn.commit()\n return True, ' Product Deleted Successfully '\n conn.commit()\n conn.close()\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\n<mask token>\n\n\ndef added_to_cart(prod_id, qry):\n if prod_id == '':\n return False, ' Please Enter Product Id ', 1\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n if qry == 'add':\n try:\n cur.execute(\n \"\"\"CREATE TABLE cart(\n id TEXT,\n name TEXT,\n quantity INTEGER,\n cost INTEGER) \"\"\"\n )\n except:\n pass\n data = cur.execute(f\"SELECT * FROM products WHERE id = '{prod_id}'\"\n ).fetchall()\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n cur.execute(\n f\"INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n elif len(cart_check) > 0:\n cur.execute(\n f\"UPDATE cart SET quantity = {cart_check[0][2] + 1},cost={cart_check[0][3] + data[0][3]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n if qry == 'remove':\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, \" Product Doesn't Exist \", all_prods\n elif len(cart_check) > 0:\n data = cur.execute(\n f\"SELECT * FROM products WHERE id = '{prod_id}'\").fetchall(\n )\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] + cart_check[0][2]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(f\"DELETE FROM cart WHERE id = '{prod_id}'\")\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Deleted Successfully ', all_prods\n conn.close()\n\n\n<mask token>\n\n\ndef done_Drp():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n cur.execute('DROP TABLE cart')\n conn.commit()\n",
"step-2": "<mask token>\n\n\ndef add_product(id_, name, quantity, cost):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(id_, name, quantity, cost)\n try:\n quantity = int(quantity)\n cost = int(cost)\n print(id_, name, quantity, cost)\n print(type(id_), type(name), type(quantity), type(cost))\n check = cur.execute(f\"SELECT * FROM products WHERE id = '{id_}'\"\n ).fetchall()\n if len(check) > 0:\n return False, ' This Product Already Exist Try Updating '\n else:\n cur.execute('INSERT INTO products VALUES(\"{}\",\"{}\",{},{})'.\n format(id_, name, quantity, cost))\n conn.commit()\n conn.close()\n return True, ' Product Added Successfully '\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef get_product_detail(prod_id):\n if prod_id == '':\n return False, ' Enter Product Id '\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(f\"SELECT rowid,* FROM products where id='{prod_id}'\"\n ).fetchall()\n conn.close()\n if len(data) == 0:\n return False, \" Product Don't Exist \"\n return True, data\n\n\ndef update_delete_product(rowid, id_, name, quantity, cost, qry):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n try:\n quantity = int(quantity)\n cost = int(cost)\n if qry == 'update':\n cur.execute(\n f\"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}\"\n )\n conn.commit()\n return True, ' Product Updated Successfully '\n if qry == 'delete':\n cur.execute(f'DELETE FROM products WHERE rowid={rowid} ')\n conn.commit()\n return True, ' Product Deleted Successfully '\n conn.commit()\n conn.close()\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef showProducts_all():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute('SELECT * FROM products').fetchall()\n return True, data\n\n\ndef added_to_cart(prod_id, qry):\n if prod_id == '':\n return False, ' Please Enter Product Id ', 1\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n if qry == 'add':\n try:\n cur.execute(\n \"\"\"CREATE TABLE cart(\n id TEXT,\n name TEXT,\n quantity INTEGER,\n cost INTEGER) \"\"\"\n )\n except:\n pass\n data = cur.execute(f\"SELECT * FROM products WHERE id = '{prod_id}'\"\n ).fetchall()\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n cur.execute(\n f\"INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n elif len(cart_check) > 0:\n cur.execute(\n f\"UPDATE cart SET quantity = {cart_check[0][2] + 1},cost={cart_check[0][3] + data[0][3]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n if qry == 'remove':\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, \" Product Doesn't Exist \", all_prods\n elif len(cart_check) > 0:\n data = cur.execute(\n f\"SELECT * FROM products WHERE id = '{prod_id}'\").fetchall(\n )\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] + cart_check[0][2]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(f\"DELETE FROM cart WHERE id = '{prod_id}'\")\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Deleted Successfully ', all_prods\n conn.close()\n\n\n<mask token>\n\n\ndef done_Drp():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n cur.execute('DROP TABLE cart')\n conn.commit()\n",
"step-3": "<mask token>\n\n\ndef verif_admin(username, password):\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(username)\n print(password)\n data = cur.execute('SELECT password FROM admin WHERE username = \"{}\"'\n .format(username)).fetchall()[0][0]\n conn.close()\n if password == data:\n return True\n else:\n return False\n except:\n return False\n\n\ndef add_product(id_, name, quantity, cost):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(id_, name, quantity, cost)\n try:\n quantity = int(quantity)\n cost = int(cost)\n print(id_, name, quantity, cost)\n print(type(id_), type(name), type(quantity), type(cost))\n check = cur.execute(f\"SELECT * FROM products WHERE id = '{id_}'\"\n ).fetchall()\n if len(check) > 0:\n return False, ' This Product Already Exist Try Updating '\n else:\n cur.execute('INSERT INTO products VALUES(\"{}\",\"{}\",{},{})'.\n format(id_, name, quantity, cost))\n conn.commit()\n conn.close()\n return True, ' Product Added Successfully '\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef get_product_detail(prod_id):\n if prod_id == '':\n return False, ' Enter Product Id '\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(f\"SELECT rowid,* FROM products where id='{prod_id}'\"\n ).fetchall()\n conn.close()\n if len(data) == 0:\n return False, \" Product Don't Exist \"\n return True, data\n\n\ndef update_delete_product(rowid, id_, name, quantity, cost, qry):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n try:\n quantity = int(quantity)\n cost = int(cost)\n if qry == 'update':\n cur.execute(\n f\"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}\"\n )\n conn.commit()\n return True, ' Product Updated Successfully '\n if qry == 'delete':\n cur.execute(f'DELETE FROM products WHERE rowid={rowid} ')\n conn.commit()\n return True, ' Product Deleted Successfully '\n conn.commit()\n conn.close()\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef showProducts_all():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute('SELECT * FROM products').fetchall()\n return True, data\n\n\ndef added_to_cart(prod_id, qry):\n if prod_id == '':\n return False, ' Please Enter Product Id ', 1\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n if qry == 'add':\n try:\n cur.execute(\n \"\"\"CREATE TABLE cart(\n id TEXT,\n name TEXT,\n quantity INTEGER,\n cost INTEGER) \"\"\"\n )\n except:\n pass\n data = cur.execute(f\"SELECT * FROM products WHERE id = '{prod_id}'\"\n ).fetchall()\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n cur.execute(\n f\"INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n elif len(cart_check) > 0:\n cur.execute(\n f\"UPDATE cart SET quantity = {cart_check[0][2] + 1},cost={cart_check[0][3] + data[0][3]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n if qry == 'remove':\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, \" Product Doesn't Exist \", all_prods\n elif len(cart_check) > 0:\n data = cur.execute(\n f\"SELECT * FROM products WHERE id = '{prod_id}'\").fetchall(\n )\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] + cart_check[0][2]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(f\"DELETE FROM cart WHERE id = '{prod_id}'\")\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Deleted Successfully ', all_prods\n conn.close()\n\n\n<mask token>\n\n\ndef done_Drp():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n cur.execute('DROP TABLE cart')\n conn.commit()\n",
"step-4": "import sqlite3\n\n\ndef verif_admin(username, password):\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(username)\n print(password)\n data = cur.execute('SELECT password FROM admin WHERE username = \"{}\"'\n .format(username)).fetchall()[0][0]\n conn.close()\n if password == data:\n return True\n else:\n return False\n except:\n return False\n\n\ndef add_product(id_, name, quantity, cost):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(id_, name, quantity, cost)\n try:\n quantity = int(quantity)\n cost = int(cost)\n print(id_, name, quantity, cost)\n print(type(id_), type(name), type(quantity), type(cost))\n check = cur.execute(f\"SELECT * FROM products WHERE id = '{id_}'\"\n ).fetchall()\n if len(check) > 0:\n return False, ' This Product Already Exist Try Updating '\n else:\n cur.execute('INSERT INTO products VALUES(\"{}\",\"{}\",{},{})'.\n format(id_, name, quantity, cost))\n conn.commit()\n conn.close()\n return True, ' Product Added Successfully '\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef get_product_detail(prod_id):\n if prod_id == '':\n return False, ' Enter Product Id '\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(f\"SELECT rowid,* FROM products where id='{prod_id}'\"\n ).fetchall()\n conn.close()\n if len(data) == 0:\n return False, \" Product Don't Exist \"\n return True, data\n\n\ndef update_delete_product(rowid, id_, name, quantity, cost, qry):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, ' You Cannot Leave It Empty '\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n try:\n quantity = int(quantity)\n cost = int(cost)\n if qry == 'update':\n cur.execute(\n f\"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}\"\n )\n conn.commit()\n return True, ' Product Updated Successfully '\n if qry == 'delete':\n cur.execute(f'DELETE FROM products WHERE rowid={rowid} ')\n conn.commit()\n return True, ' Product Deleted Successfully '\n conn.commit()\n conn.close()\n except:\n return False, ' Quantity and Cost are Integers '\n except:\n return False, ' Failed Connecting Database '\n\n\ndef showProducts_all():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute('SELECT * FROM products').fetchall()\n return True, data\n\n\ndef added_to_cart(prod_id, qry):\n if prod_id == '':\n return False, ' Please Enter Product Id ', 1\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n if qry == 'add':\n try:\n cur.execute(\n \"\"\"CREATE TABLE cart(\n id TEXT,\n name TEXT,\n quantity INTEGER,\n cost INTEGER) \"\"\"\n )\n except:\n pass\n data = cur.execute(f\"SELECT * FROM products WHERE id = '{prod_id}'\"\n ).fetchall()\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n cur.execute(\n f\"INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n elif len(cart_check) > 0:\n cur.execute(\n f\"UPDATE cart SET quantity = {cart_check[0][2] + 1},cost={cart_check[0][3] + data[0][3]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] - 1} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Added To Cart Successfully ', all_prods\n if qry == 'remove':\n cart_check = cur.execute(\n f\"SELECT * FROM cart WHERE id = '{prod_id}' \").fetchall()\n if len(cart_check) == 0:\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, \" Product Doesn't Exist \", all_prods\n elif len(cart_check) > 0:\n data = cur.execute(\n f\"SELECT * FROM products WHERE id = '{prod_id}'\").fetchall(\n )\n cur.execute(\n f\"UPDATE products SET quantity = {data[0][2] + cart_check[0][2]} WHERE id ='{prod_id}'\"\n )\n conn.commit()\n cur.execute(f\"DELETE FROM cart WHERE id = '{prod_id}'\")\n conn.commit()\n all_prods = cur.execute('SELECT * FROM cart').fetchall()\n return True, ' Product Deleted Successfully ', all_prods\n conn.close()\n\n\ndef get_cost():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute('SELECT * FROM cart').fetchall()\n cost = 0\n for i in data:\n cost = cost + i[3]\n return cost\n\n\ndef done_Drp():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n cur.execute('DROP TABLE cart')\n conn.commit()\n",
"step-5": "import sqlite3\n\n\n# cur.execute('CREATE TABLE admin(username TEXT,password TEXT)')\n# conn.commit()\n# cur.execute(\"INSERT INTO admin VALUES('nilesh','nilesh')\")\n# conn.commit()\n\ndef verif_admin(username, password):\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(username)\n print(password)\n data = cur.execute('SELECT password FROM admin WHERE username = \"{}\"'.format(username)).fetchall()[0][0]\n\n conn.close()\n if password == data:\n return True\n else:\n return False\n except:\n return False\n\n\ndef add_product(id_, name, quantity, cost):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, \" You Cannot Leave It Empty \"\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n print(id_, name, quantity, cost)\n try:\n quantity = int(quantity)\n cost = int(cost)\n print(id_, name, quantity, cost)\n print(type(id_), type(name), type(quantity), type(cost))\n check = cur.execute(f\"SELECT * FROM products WHERE id = '{id_}'\").fetchall()\n if len(check) > 0:\n return False, \" This Product Already Exist Try Updating \"\n else:\n cur.execute('INSERT INTO products VALUES(\"{}\",\"{}\",{},{})'.format(id_, name, quantity, cost))\n conn.commit()\n conn.close()\n return True, \" Product Added Successfully \"\n except:\n\n return False, \" Quantity and Cost are Integers \"\n\n except:\n\n return False, \" Failed Connecting Database \"\n\n\ndef get_product_detail(prod_id):\n if prod_id == '':\n return False, \" Enter Product Id \"\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(f\"SELECT rowid,* FROM products where id='{prod_id}'\").fetchall()\n conn.close()\n if len(data) == 0:\n return False, \" Product Don't Exist \"\n return True, data\n\n\ndef update_delete_product(rowid, id_, name, quantity, cost, qry):\n if id_ == '' and name == '' and quantity == '' and cost == '':\n return False, \" You Cannot Leave It Empty \"\n try:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n try:\n quantity = int(quantity)\n cost = int(cost)\n if qry == 'update':\n cur.execute(\n f\"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}\")\n conn.commit()\n return True, \" Product Updated Successfully \"\n if qry == \"delete\":\n cur.execute(f\"DELETE FROM products WHERE rowid={rowid} \")\n conn.commit()\n return True, \" Product Deleted Successfully \"\n conn.commit()\n conn.close()\n\n except:\n\n return False, \" Quantity and Cost are Integers \"\n except:\n return False, \" Failed Connecting Database \"\n\n\ndef showProducts_all():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(\"SELECT * FROM products\").fetchall()\n return True, data\n\n\ndef added_to_cart(prod_id, qry):\n if prod_id == '':\n return False, \" Please Enter Product Id \",1\n else:\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n if qry == \"add\":\n try:\n cur.execute(\"\"\"CREATE TABLE cart(\n id TEXT,\n name TEXT,\n quantity INTEGER,\n cost INTEGER) \"\"\")\n except:\n pass\n\n data = cur.execute(f\"\"\"SELECT * FROM products WHERE id = '{prod_id}'\"\"\").fetchall()\n cart_check = cur.execute(f\"\"\"SELECT * FROM cart WHERE id = '{prod_id}' \"\"\").fetchall()\n if len(cart_check) == 0:\n cur.execute(f\"\"\"INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})\"\"\")\n conn.commit()\n cur.execute(f\"\"\"UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'\"\"\")\n conn.commit()\n all_prods = cur.execute(\"SELECT * FROM cart\").fetchall()\n return True, \" Product Added To Cart Successfully \",all_prods\n\n elif len(cart_check) > 0:\n cur.execute(\n f\"\"\"UPDATE cart SET quantity = {(cart_check[0][2] + 1)},cost={(cart_check[0][3] + data[0][3])} WHERE id ='{prod_id}'\"\"\")\n conn.commit()\n cur.execute(f\"\"\"UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'\"\"\")\n conn.commit()\n all_prods = cur.execute(\"SELECT * FROM cart\").fetchall()\n return True, \" Product Added To Cart Successfully \",all_prods\n\n\n if qry == \"remove\":\n\n cart_check = cur.execute(f\"\"\"SELECT * FROM cart WHERE id = '{prod_id}' \"\"\").fetchall()\n if len(cart_check) == 0:\n all_prods = cur.execute(\"SELECT * FROM cart\").fetchall()\n return True,\" Product Doesn't Exist \",all_prods\n elif len(cart_check) > 0:\n data = cur.execute(f\"\"\"SELECT * FROM products WHERE id = '{prod_id}'\"\"\").fetchall()\n cur.execute(f\"UPDATE products SET quantity = {(data[0][2]+cart_check[0][2])} WHERE id ='{prod_id}'\")\n conn.commit()\n cur.execute(f\"DELETE FROM cart WHERE id = '{prod_id}'\")\n conn.commit()\n all_prods = cur.execute(\"SELECT * FROM cart\").fetchall()\n return True,\" Product Deleted Successfully \",all_prods\n\n conn.close()\n\n\ndef get_cost():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n data = cur.execute(\"SELECT * FROM cart\").fetchall()\n cost = 0\n for i in data:\n cost = cost+i[3]\n return cost\n\n\ndef done_Drp():\n conn = sqlite3.connect('SuperMarket.db')\n cur = conn.cursor()\n cur.execute(\"DROP TABLE cart\")\n conn.commit()\n\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
from flask import Blueprint
views = Blueprint('views', __name__)
from . import routes
|
normal
|
{
"blob_id": "139ccdaf7acb2a2d74649f0c32217d1fe71a954a",
"index": 4800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nviews = Blueprint('views', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\nviews = Blueprint('views', __name__)\nfrom . import routes\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class GamificationLeaderboardsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_leaderboard(self, context_type, context_id, **kwargs):
"""
Retrieves leaderboard details and paginated entries
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard(context_type, context_id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param int size: The number of objects returned per page
:param int page: The number of the page returned, starting with 1
:param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]
:return: LeaderboardResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)
else:
(data) = self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)
return data
def get_leaderboard_with_http_info(self, context_type, context_id, **kwargs):
"""
Retrieves leaderboard details and paginated entries
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_with_http_info(context_type, context_id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param int size: The number of objects returned per page
:param int page: The number of the page returned, starting with 1
:param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]
:return: LeaderboardResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_type', 'context_id', 'size', 'page', 'order']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_type' is set
if ('context_type' not in params) or (params['context_type'] is None):
raise ValueError("Missing the required parameter `context_type` when calling `get_leaderboard`")
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_leaderboard`")
collection_formats = {}
path_params = {}
if 'context_type' in params:
path_params['context_type'] = params['context_type']
if 'context_id' in params:
path_params['context_id'] = params['context_id']
query_params = []
if 'size' in params:
query_params.append(('size', params['size']))
if 'page' in params:
query_params.append(('page', params['page']))
if 'order' in params:
query_params.append(('order', params['order']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/{context_type}/{context_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LeaderboardResource',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_leaderboard_rank(self, context_type, context_id, id, **kwargs):
"""
Retrieves a specific user entry with rank
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_rank(context_type, context_id, id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param str id: The id of a user (required)
:return: LeaderboardEntryResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)
else:
(data) = self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)
return data
def get_leaderboard_rank_with_http_info(self, context_type, context_id, id, **kwargs):
"""
Retrieves a specific user entry with rank
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_rank_with_http_info(context_type, context_id, id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param str id: The id of a user (required)
:return: LeaderboardEntryResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_type', 'context_id', 'id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard_rank" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_type' is set
if ('context_type' not in params) or (params['context_type'] is None):
raise ValueError("Missing the required parameter `context_type` when calling `get_leaderboard_rank`")
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_leaderboard_rank`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_leaderboard_rank`")
collection_formats = {}
path_params = {}
if 'context_type' in params:
path_params['context_type'] = params['context_type']
if 'context_id' in params:
path_params['context_id'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/{context_type}/{context_id}/users/{id}/rank', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LeaderboardEntryResource',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_leaderboard_strategies(self, **kwargs):
"""
Get a list of available leaderboard strategy names
<b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_strategies(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_strategies_with_http_info(**kwargs)
else:
(data) = self.get_leaderboard_strategies_with_http_info(**kwargs)
return data
def get_leaderboard_strategies_with_http_info(self, **kwargs):
"""
Get a list of available leaderboard strategy names
<b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_strategies_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard_strategies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/strategies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
normal
|
{
"blob_id": "05aec07b94f3363e07d8740b102262d817e08e71",
"index": 1253,
"step-1": "# coding: utf-8\n\n\"\"\"\n Knetik Platform API Documentation latest \n\n This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.\n\n OpenAPI spec version: latest \n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport re\n\n# python 2 and python 3 compatibility library\nfrom six import iteritems\n\nfrom ..api_client import ApiClient\n\n\nclass GamificationLeaderboardsApi(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def get_leaderboard(self, context_type, context_id, **kwargs):\n \"\"\"\n Retrieves leaderboard details and paginated entries\n The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard(context_type, context_id, async=True)\n >>> result = thread.get()\n\n :param async bool\n :param str context_type: The context type for the leaderboard (required)\n :param str context_id: The context id for the leaderboard (required)\n :param int size: The number of objects returned per page\n :param int page: The number of the page returned, starting with 1\n :param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]\n :return: LeaderboardResource\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)\n else:\n (data) = self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)\n return data\n\n def get_leaderboard_with_http_info(self, context_type, context_id, **kwargs):\n \"\"\"\n Retrieves leaderboard details and paginated entries\n The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard_with_http_info(context_type, context_id, async=True)\n >>> result = thread.get()\n\n :param async bool\n :param str context_type: The context type for the leaderboard (required)\n :param str context_id: The context id for the leaderboard (required)\n :param int size: The number of objects returned per page\n :param int page: The number of the page returned, starting with 1\n :param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]\n :return: LeaderboardResource\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['context_type', 'context_id', 'size', 'page', 'order']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_leaderboard\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'context_type' is set\n if ('context_type' not in params) or (params['context_type'] is None):\n raise ValueError(\"Missing the required parameter `context_type` when calling `get_leaderboard`\")\n # verify the required parameter 'context_id' is set\n if ('context_id' not in params) or (params['context_id'] is None):\n raise ValueError(\"Missing the required parameter `context_id` when calling `get_leaderboard`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'context_type' in params:\n path_params['context_type'] = params['context_type']\n if 'context_id' in params:\n path_params['context_id'] = params['context_id']\n\n query_params = []\n if 'size' in params:\n query_params.append(('size', params['size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n if 'order' in params:\n query_params.append(('order', params['order']))\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/leaderboards/{context_type}/{context_id}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LeaderboardResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_leaderboard_rank(self, context_type, context_id, id, **kwargs):\n \"\"\"\n Retrieves a specific user entry with rank\n The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard_rank(context_type, context_id, id, async=True)\n >>> result = thread.get()\n\n :param async bool\n :param str context_type: The context type for the leaderboard (required)\n :param str context_id: The context id for the leaderboard (required)\n :param str id: The id of a user (required)\n :return: LeaderboardEntryResource\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)\n else:\n (data) = self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)\n return data\n\n def get_leaderboard_rank_with_http_info(self, context_type, context_id, id, **kwargs):\n \"\"\"\n Retrieves a specific user entry with rank\n The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard_rank_with_http_info(context_type, context_id, id, async=True)\n >>> result = thread.get()\n\n :param async bool\n :param str context_type: The context type for the leaderboard (required)\n :param str context_id: The context id for the leaderboard (required)\n :param str id: The id of a user (required)\n :return: LeaderboardEntryResource\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['context_type', 'context_id', 'id']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_leaderboard_rank\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'context_type' is set\n if ('context_type' not in params) or (params['context_type'] is None):\n raise ValueError(\"Missing the required parameter `context_type` when calling `get_leaderboard_rank`\")\n # verify the required parameter 'context_id' is set\n if ('context_id' not in params) or (params['context_id'] is None):\n raise ValueError(\"Missing the required parameter `context_id` when calling `get_leaderboard_rank`\")\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_leaderboard_rank`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'context_type' in params:\n path_params['context_type'] = params['context_type']\n if 'context_id' in params:\n path_params['context_id'] = params['context_id']\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/leaderboards/{context_type}/{context_id}/users/{id}/rank', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LeaderboardEntryResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_leaderboard_strategies(self, **kwargs):\n \"\"\"\n Get a list of available leaderboard strategy names\n <b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard_strategies(async=True)\n >>> result = thread.get()\n\n :param async bool\n :return: list[str]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.get_leaderboard_strategies_with_http_info(**kwargs)\n else:\n (data) = self.get_leaderboard_strategies_with_http_info(**kwargs)\n return data\n\n def get_leaderboard_strategies_with_http_info(self, **kwargs):\n \"\"\"\n Get a list of available leaderboard strategy names\n <b>Permissions Needed:</b> ANY\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.get_leaderboard_strategies_with_http_info(async=True)\n >>> result = thread.get()\n\n :param async bool\n :return: list[str]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = []\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_leaderboard_strategies\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/leaderboards/strategies', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[str]',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `feat` package."""
from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from .utils import get_test_data_path
import pandas as pd
import feat
import os
import wget
# def test_models():
# print("Downloading FEX emotion model.")
# fex_emotion_model = "https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5"
# wget.download(fex_emotion_model, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "fer_aug_model.h5")):
# print("\nFEX emotion model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# print("Downloading landmark detection model.")
# lbfmodel = "https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml"
# wget.download(lbfmodel, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "lbfmodel.yaml")):
# print("\nLandmark detection model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# emotion_model = "fer_aug_model.h5"
# emotion_model_path = os.path.join(get_resource_path(), emotion_model)
# print("PATH TO EMOTION MODEL",emotion_model_path)
# assert os.path.exists(emotion_model_path)==True
# landmark_model = "lbfmodel.yaml"
# landmark_model_path = os.path.join(get_resource_path(), landmark_model)
# assert os.path.exists(landmark_model_path)==True
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs']==1
assert type(detector)==Detector
# Test detect image
inputFname = os.path.join(get_test_data_path(), "input.jpg")
out = detector.detect_image(inputFname = inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
# Test detect video
inputFname = os.path.join(get_test_data_path(), "input.mp4")
out = detector.detect_video(inputFname=inputFname)
assert len(out)==72
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
|
normal
|
{
"blob_id": "753bdbf080e7a8652c39e40beeae51f74382d606",
"index": 1300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n",
"step-3": "<mask token>\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `feat` package.\"\"\"\n\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n# def test_models():\n# print(\"Downloading FEX emotion model.\")\n# fex_emotion_model = \"https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5\"\n# wget.download(fex_emotion_model, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"fer_aug_model.h5\")):\n# print(\"\\nFEX emotion model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# print(\"Downloading landmark detection model.\")\n# lbfmodel = \"https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml\"\n# wget.download(lbfmodel, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"lbfmodel.yaml\")):\n# print(\"\\nLandmark detection model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# emotion_model = \"fer_aug_model.h5\"\n# emotion_model_path = os.path.join(get_resource_path(), emotion_model)\n# print(\"PATH TO EMOTION MODEL\",emotion_model_path)\n# assert os.path.exists(emotion_model_path)==True\n\n# landmark_model = \"lbfmodel.yaml\"\n# landmark_model_path = os.path.join(get_resource_path(), landmark_model)\n# assert os.path.exists(landmark_model_path)==True\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs']==1\n assert type(detector)==Detector\n\n # Test detect image\n inputFname = os.path.join(get_test_data_path(), \"input.jpg\")\n out = detector.detect_image(inputFname = inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0 \n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0 \n\n # Test detect video\n inputFname = os.path.join(get_test_data_path(), \"input.mp4\")\n out = detector.detect_video(inputFname=inputFname)\n assert len(out)==72\n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import numpy as np
import math
from sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import pairwise_distances
class KMeans():
def __init__(self, k = 5, max_iters = 100, random_seed = 42):
self.k = k
self.max_iters = max_iters
# Set random seed
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids)**2, axis = 1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')
cluster_labels = np.argmin(cluster_distances, axis = 1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
# Get all data points of a cluster
X_cluster = X[cluster_labels == cluster]
# Update the cluster's centroid
cluster_mean = np.mean(X_cluster, axis = 0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
# Initialise random centroids
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
# Assign clusters to data
cluster_labels = self._assign_clusters(X)
# Update centroids
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
# Load data
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size = 0.1)
# Fit model
model = KMeans(k = 5)
model.fit(X_train)
# Predict
y_pred = model.predict(X_test)
print(y_pred)
|
normal
|
{
"blob_id": "d267c8cbe51fb1bacc9404a1385f1daa4a0db7f2",
"index": 884,
"step-1": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n <mask token>\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size=0.1)\nmodel = KMeans(k=5)\nmodel.fit(X_train)\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import pairwise_distances\n\n\nclass KMeans():\n\n def __init__(self, k = 5, max_iters = 100, random_seed = 42):\n self.k = k\n self.max_iters = max_iters\n\n # Set random seed\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids)**2, axis = 1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis = 1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n\n # Get all data points of a cluster\n X_cluster = X[cluster_labels == cluster]\n\n # Update the cluster's centroid\n cluster_mean = np.mean(X_cluster, axis = 0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n\n # Initialise random centroids\n self._initialise_centroids(X)\n\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n\n # Assign clusters to data\n cluster_labels = self._assign_clusters(X)\n\n # Update centroids\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n# Load data\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size = 0.1)\n\n# Fit model\nmodel = KMeans(k = 5)\nmodel.fit(X_train)\n\n# Predict\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
from utils import create_data_lists
if __name__ == '__main__':
create_data_lists(ICDAR_path=
'../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=
'../ICDAR_Dataset/0325updated.task1train(626p)')
|
normal
|
{
"blob_id": "6334a8a052d72b0f13395b301bd5a766acf4399b",
"index": 3437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n create_data_lists(ICDAR_path=\n '../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=\n '../ICDAR_Dataset/0325updated.task1train(626p)')\n",
"step-3": "from utils import create_data_lists\nif __name__ == '__main__':\n create_data_lists(ICDAR_path=\n '../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=\n '../ICDAR_Dataset/0325updated.task1train(626p)')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import requests
import os
from jpmesh import parse_mesh_code
from tqdm import tqdm
url_login='https://platform.openquake.org/account/login/'
client = requests.session()
client.get(url_login)
# Identification for openquake platform
login_data = {'username':'###','password':'###'}
r1=client.post(url_login,data=login_data)
def scrap_expo():
dir_names=os.listdir('Site Effects/')
for name in dir_names:
fcode = name.split('-')[-1]
mesh = parse_mesh_code(fcode)
sw = mesh.south_west
ne = sw+ mesh.size
lng1 = str(sw.lon.degree)
lng2 = str(ne.lon.degree)
lat1 = str(ne.lat.degree)
lat2 = str(sw.lat.degree)
for occ in ['residential', 'non-residential']:
url_add_run='http://platform.openquake.org/exposure/export_exposure?output_type=csv&sr_id=113&occupancy_filter='+occ+'&lng1='+lng1+'&lat1='+lat1+'&lng2='+lng2+'&lat2='+lat2
output = open('Exposure/'+occ+'/'+fcode+'.csv', 'wb')
print(fcode)
r2=client.get(url_add_run, stream=True)
for data in tqdm(r2.iter_content()):
output.write(data)
output.close()
print(r2.status_code)
def scrap_consequences():
eq_code = str(134)
url_add_run = 'https://platform.openquake.org/ecd/eventoverview/' + eq_code + '?&zoomtoextent=True&f_b=False&f_c=False&f_i=False&f_p=False&f_s=False&all=True'
file_name = 'Consequences/' + eq_code + '.txt'
output = open(file_name, 'wb')
print client
r2 = client.get(url_add_run, stream=True)
print r2.status_code
for data in tqdm(r2.iter_content()):
print data
output.write(data)
output.close()
data = open(file_name).readlines()
print data.split('')
# scrap_consequences()
|
normal
|
{
"blob_id": "63a40282f16a7f27c118594f1a9468749682594f",
"index": 420,
"step-1": "import requests\nimport os\nfrom jpmesh import parse_mesh_code\nfrom tqdm import tqdm\n\nurl_login='https://platform.openquake.org/account/login/'\nclient = requests.session()\nclient.get(url_login)\n# Identification for openquake platform\nlogin_data = {'username':'###','password':'###'}\nr1=client.post(url_login,data=login_data)\ndef scrap_expo():\n dir_names=os.listdir('Site Effects/')\n for name in dir_names:\n fcode = name.split('-')[-1]\n mesh = parse_mesh_code(fcode)\n sw = mesh.south_west\n ne = sw+ mesh.size\n lng1 = str(sw.lon.degree)\n lng2 = str(ne.lon.degree)\n lat1 = str(ne.lat.degree)\n lat2 = str(sw.lat.degree)\n for occ in ['residential', 'non-residential']:\n url_add_run='http://platform.openquake.org/exposure/export_exposure?output_type=csv&sr_id=113&occupancy_filter='+occ+'&lng1='+lng1+'&lat1='+lat1+'&lng2='+lng2+'&lat2='+lat2\n output = open('Exposure/'+occ+'/'+fcode+'.csv', 'wb')\n print(fcode)\n r2=client.get(url_add_run, stream=True)\n for data in tqdm(r2.iter_content()):\n output.write(data)\n output.close()\n print(r2.status_code)\n\n\ndef scrap_consequences():\n eq_code = str(134)\n url_add_run = 'https://platform.openquake.org/ecd/eventoverview/' + eq_code + '?&zoomtoextent=True&f_b=False&f_c=False&f_i=False&f_p=False&f_s=False&all=True'\n file_name = 'Consequences/' + eq_code + '.txt'\n output = open(file_name, 'wb')\n print client\n r2 = client.get(url_add_run, stream=True)\n print r2.status_code\n for data in tqdm(r2.iter_content()):\n print data\n output.write(data)\n output.close()\n data = open(file_name).readlines()\n print data.split('')\n\n# scrap_consequences()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# test CurlypivSetup
"""
Notes about program
"""
# 1.0 import modules
import numpy as np
from skimage import io
import glob
from os.path import join
import matplotlib.pyplot as plt
from curlypiv.utils.calibrateCamera import measureIlluminationDistributionXY, calculate_depth_of_correlation, calculate_darkfield, plot_field_depth
# 2.0 define class
class CurlypivTestSetup(object):
def __init__(self, name, chip, optics, fluid_handling_system):
"""
All the "settings" used in the experimental setup:
1. chip (class)
1.1 solid material (class) (e.g. SiO2)
1.1.1 transparency
1.1.2 fluorescence spectral characteristics
1.1.3 surface charge density
1.1.4 %/vol (here would be 100%)
1.2 channel (class)
1.2.1 height
1.2.2 width
1.2.3 length
1.3 reservoir volume
1.4 electrode configuration (class)
1.4.1 material
1.4.2 separation distance
1.4.3 distance to channel entrance
2. test solution (class)
2.1 liquid material (class) (e.g. electrolyte)
2.1.1 chemical species (e.g. KCl)
2.1.2 concentration
2.1.3 measurable quantity (class) (e.g. conductivity)
2.1.3.1 theoretical
2.1.3.2 measured
2.1.3.2.1 measured conductivity
2.1.3.2.1 measured date
2.1.4 measurable quantity (class) (e.g. pH)
2.1.4.1 theoretical
2.1.4.2 measured
2.1.4.2.1 measured conductivity
2.1.4.2.1 measured date
2.2 fluorescent particles (class)
2.2.0 diameter
2.2.. measurable quantity (class) (e.g. zeta)
2.2.. measurable quantity (class) (e.g electrophoretic mobility)
2.2.. spectral characteristics
2.2.1 solid materials (class) (e.g. polystyrene)
2.2.1.1 %/vol
2.2.2 liquid materials (class) (e.g. DI water)
2.2.3 liquid materials (Class) (e.g. sodium azide)
2.2.3.1 conductivity
2.2.3.2 concentration
3. illumination (class)
3.1 source (class)
3.1.1 type (e.g. Hg lamp)
3.1.2 intensity
3.1.3 emission spectra
3.2 optical element (class) (e.g. excitation filter)
3.3 optical element (class) (e.g. emission filter)
3.4 optical element (class) (e.g. dichroic mirror)
4. microscope
4.1 type (Olympus iX 73)
4.2 objective (class)
4.2.1 numerical aperature (e.g. 0.3)
4.2.2 magnification (e.g. 20X)
4.2.3 field of view (e.g. 500 x 500 um)
4.2.4 depth of focus (e.g 4.1 microns)
"""
self.name = name
self.chip = chip
self.optics = optics
self.fluid_handling_system = fluid_handling_system
class chip(object):
def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=None, fluid_handling_system=None,
material_in_optical_path=None, thickness_in_optical_path=None):
"""
Everything important about the chip
"""
#self.material = material # deprecated so the channel class can hold this information
self.channel = channel
self.bpe = bpe
self.electrodes = electrodes
self.fluid_handling_system = fluid_handling_system
self.material_in_optical_path = material_in_optical_path
self.thickness_in_optical_path = thickness_in_optical_path
class channel(object):
def __init__(self, length=None, width=None, height=None,
material_bottom_wall_surface=None, material_top_wall_surface=None, material_fluid=None):
"""
Everything important about the chip
"""
self.length = length
self.width = width
self.height = height
self.material_bottom_wall_surface = material_bottom_wall_surface # material should only hold relevant electrokinetic data
self.material_top_wall_surface = material_top_wall_surface # material should only hold relevant elect
self.material_fluid = material_fluid # could be a mixture of liquid materials + fluorescent particles
class bpe(object):
def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,
dielectric_coating=None):
"""
Everything important about the chip
"""
self.length = length
self.linspace_x = np.linspace(-length/2, length/2, num=100)
self.width = width
self.height = height
self.material = material
if self.material.thickness:
if self.material.thickness != self.height:
raise ValueError("BPE height must equal BPE material thickness")
# adhesion layer used for thin metal film BPE
self.adhesion_material = adhesion_material
# dielectric coating on top of BPE
if dielectric_coating:
self.dielectric_coating = dielectric_coating
else:
self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)
class optics(object):
def __init__(self, microscope, fluorescent_particles=None, calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.60 # (microns/pixels)
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55 # (microns/pixels)
else:
raise ValueError("Unable to determine microns/pixels scaling because objective magnification not 50X or 20X")
if pixel_to_micron_scaling is not None:
print("Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.".format(self.pixel_to_micron_scaling, self.microscope.objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None, emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False,
illumPath=None, illumSavePath=None, illumSaveName=None, showIllumPlot=False, save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath # this should come from CurlypivTestCollection
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)), dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(basePath=self.basePath, illumPath=illumPath,
show_image=showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=save_plot,
savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,
savePath=savePath, savename=savename, save_plot=save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type # e.g. Olympus iX73
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,
vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,
frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',
pixels=512, pixel_size=16e-6):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
# supporting camera acquisition settings
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = (pixels, pixels)
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None, magnification=None, basePath=None, channel_height=None, illumination=None, wavelength=None, microgrid=None, auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None, field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
# if name is entered, then pull all the terms directly
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
# general terms
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError("A wavelength is required via the <illumination> class or <wavelength> input parameter")
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation, depth_of_field=self.depth_of_field, show_depth_plot=show_depth_plot, save_depth_plot=save_depth_plot,
basePath=basePath, savename=None, channel_height=channel_height, objective=self.magnification)
# grids and scaling factors
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=16e-6, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = self._wavelength*n/self.numerical_aperture**2+e*n/(self.magnification*self.numerical_aperture)
def calculate_depth_of_correlation(self, eps=0.01):
# step 0: define
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
# step 1: calculate the depth of correlation for the optical setup
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA, dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError("Need objective.microgrid property in order to calculate scaling factor")
# script to calculate scaling factor from grid
# would go here
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None, feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
# find files in directory
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError("No grid*.tif files found in {}".format(self.gridPath))
img_grid = np.zeros(shape=(512,512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None,diameter=None,fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials=materials
self.concentration=concentration
self.electrophoretic_mobility=electrophoretic_mobility
self.zeta=zeta
self.diameter=diameter
if diameter:
k_b = 1.3806e-23
T=298
mu=0.001
self.diffusivity = k_b*T/(6*np.pi*mu*diameter/2)
self.fluorescence_spectra=fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81 # m/s**2
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi*self.diameter**2/4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = material.density*g*self.height_of_reservoir
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir=fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths=passing_wavelengths
self.reflectivity=reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None, index_of_refraction=None, transparency=None, fluorescence_spectra=None,
permittivity=None, conductivity=None, thickness=None, youngs_modulus=None, poissons_ratio=None,
density=None, dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
# identity
self.name = name
# geometry
self.length = length
self.width = width
self.thickness = thickness
# mechanical
self.density = density
self.concentration = concentration # For a solid, this is % by volume.
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
# optical
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
# electrochemical
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density*1e18 # (#/nm2) surface density of reaction sites: accepts nm2 and converts to m2 (see Squires)
self.Ka = Ka # reaction equilibrium constant - upper bound
self.Kb = Kb # reaction equilibrium constant - lower bound
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None, conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
# identity
self.name = name
# electro/chemical
self.species = species
self.concentration = concentration # (mmol) = (mmol/L) = (mol/m3)
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10**-pH * 1e3 # (mmol) = (mmol/L) = (mol/m3); (concentration of Hydrogen ions (H+)
self.valence = valence
# mechanical
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-9 # (m^2/s) Diffusivity of KCl in DI water [Soni]
|
normal
|
{
"blob_id": "6ca7b896cc20220f790c06d4ba08fef7bda8400f",
"index": 3301,
"step-1": "<mask token>\n\n\nclass illumination(object):\n <mask token>\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-2": "<mask token>\n\n\nclass bpe(object):\n <mask token>\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-3": "<mask token>\n\n\nclass chip(object):\n <mask token>\n\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None,\n material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface\n self.material_top_wall_surface = material_top_wall_surface\n self.material_fluid = material_fluid\n\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None,\n adhesion_material=None, dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length / 2, length / 2, num=100)\n self.width = width\n self.height = height\n self.material = material\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError('BPE height must equal BPE material thickness'\n )\n self.adhesion_material = adhesion_material\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric',\n permittivity=1, thickness=1e-12, Ka=6, Kb=2,\n reaction_site_density=5)\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-4": "<mask token>\n\n\nclass CurlypivTestSetup(object):\n\n def __init__(self, name, chip, optics, fluid_handling_system):\n \"\"\"\n All the \"settings\" used in the experimental setup:\n 1. chip (class)\n 1.1 solid material (class) (e.g. SiO2)\n 1.1.1 transparency\n 1.1.2 fluorescence spectral characteristics\n 1.1.3 surface charge density\n 1.1.4 %/vol (here would be 100%)\n 1.2 channel (class)\n 1.2.1 height\n 1.2.2 width\n 1.2.3 length\n 1.3 reservoir volume\n 1.4 electrode configuration (class)\n 1.4.1 material\n 1.4.2 separation distance\n 1.4.3 distance to channel entrance\n 2. test solution (class)\n 2.1 liquid material (class) (e.g. electrolyte)\n 2.1.1 chemical species (e.g. KCl)\n 2.1.2 concentration\n 2.1.3 measurable quantity (class) (e.g. conductivity)\n 2.1.3.1 theoretical\n 2.1.3.2 measured\n 2.1.3.2.1 measured conductivity\n 2.1.3.2.1 measured date\n 2.1.4 measurable quantity (class) (e.g. pH)\n 2.1.4.1 theoretical\n 2.1.4.2 measured\n 2.1.4.2.1 measured conductivity\n 2.1.4.2.1 measured date\n 2.2 fluorescent particles (class)\n 2.2.0 diameter\n 2.2.. measurable quantity (class) (e.g. zeta)\n 2.2.. measurable quantity (class) (e.g electrophoretic mobility)\n 2.2.. spectral characteristics\n 2.2.1 solid materials (class) (e.g. polystyrene)\n 2.2.1.1 %/vol\n 2.2.2 liquid materials (class) (e.g. DI water)\n 2.2.3 liquid materials (Class) (e.g. sodium azide)\n 2.2.3.1 conductivity\n 2.2.3.2 concentration\n 3. illumination (class)\n 3.1 source (class)\n 3.1.1 type (e.g. Hg lamp)\n 3.1.2 intensity\n 3.1.3 emission spectra\n 3.2 optical element (class) (e.g. excitation filter)\n 3.3 optical element (class) (e.g. emission filter)\n 3.4 optical element (class) (e.g. dichroic mirror)\n 4. microscope\n 4.1 type (Olympus iX 73)\n 4.2 objective (class)\n 4.2.1 numerical aperature (e.g. 0.3)\n 4.2.2 magnification (e.g. 20X)\n 4.2.3 field of view (e.g. 500 x 500 um)\n 4.2.4 depth of focus (e.g 4.1 microns)\n \"\"\"\n self.name = name\n self.chip = chip\n self.optics = optics\n self.fluid_handling_system = fluid_handling_system\n\n\nclass chip(object):\n\n def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=\n None, fluid_handling_system=None, material_in_optical_path=None,\n thickness_in_optical_path=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.channel = channel\n self.bpe = bpe\n self.electrodes = electrodes\n self.fluid_handling_system = fluid_handling_system\n self.material_in_optical_path = material_in_optical_path\n self.thickness_in_optical_path = thickness_in_optical_path\n\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None,\n material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface\n self.material_top_wall_surface = material_top_wall_surface\n self.material_fluid = material_fluid\n\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None,\n adhesion_material=None, dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length / 2, length / 2, num=100)\n self.width = width\n self.height = height\n self.material = material\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError('BPE height must equal BPE material thickness'\n )\n self.adhesion_material = adhesion_material\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric',\n permittivity=1, thickness=1e-12, Ka=6, Kb=2,\n reaction_site_density=5)\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-5": "# test CurlypivSetup\n\"\"\"\nNotes about program\n\"\"\"\n\n# 1.0 import modules\nimport numpy as np\nfrom skimage import io\nimport glob\nfrom os.path import join\nimport matplotlib.pyplot as plt\nfrom curlypiv.utils.calibrateCamera import measureIlluminationDistributionXY, calculate_depth_of_correlation, calculate_darkfield, plot_field_depth\n\n# 2.0 define class\n\nclass CurlypivTestSetup(object):\n\n def __init__(self, name, chip, optics, fluid_handling_system):\n \"\"\"\n All the \"settings\" used in the experimental setup:\n 1. chip (class)\n 1.1 solid material (class) (e.g. SiO2)\n 1.1.1 transparency\n 1.1.2 fluorescence spectral characteristics\n 1.1.3 surface charge density\n 1.1.4 %/vol (here would be 100%)\n 1.2 channel (class)\n 1.2.1 height\n 1.2.2 width\n 1.2.3 length\n 1.3 reservoir volume\n 1.4 electrode configuration (class)\n 1.4.1 material\n 1.4.2 separation distance\n 1.4.3 distance to channel entrance\n 2. test solution (class)\n 2.1 liquid material (class) (e.g. electrolyte)\n 2.1.1 chemical species (e.g. KCl)\n 2.1.2 concentration\n 2.1.3 measurable quantity (class) (e.g. conductivity)\n 2.1.3.1 theoretical\n 2.1.3.2 measured\n 2.1.3.2.1 measured conductivity\n 2.1.3.2.1 measured date\n 2.1.4 measurable quantity (class) (e.g. pH)\n 2.1.4.1 theoretical\n 2.1.4.2 measured\n 2.1.4.2.1 measured conductivity\n 2.1.4.2.1 measured date\n 2.2 fluorescent particles (class)\n 2.2.0 diameter\n 2.2.. measurable quantity (class) (e.g. zeta)\n 2.2.. measurable quantity (class) (e.g electrophoretic mobility)\n 2.2.. spectral characteristics\n 2.2.1 solid materials (class) (e.g. polystyrene)\n 2.2.1.1 %/vol\n 2.2.2 liquid materials (class) (e.g. DI water)\n 2.2.3 liquid materials (Class) (e.g. sodium azide)\n 2.2.3.1 conductivity\n 2.2.3.2 concentration\n 3. illumination (class)\n 3.1 source (class)\n 3.1.1 type (e.g. Hg lamp)\n 3.1.2 intensity\n 3.1.3 emission spectra\n 3.2 optical element (class) (e.g. excitation filter)\n 3.3 optical element (class) (e.g. emission filter)\n 3.4 optical element (class) (e.g. dichroic mirror)\n 4. microscope\n 4.1 type (Olympus iX 73)\n 4.2 objective (class)\n 4.2.1 numerical aperature (e.g. 0.3)\n 4.2.2 magnification (e.g. 20X)\n 4.2.3 field of view (e.g. 500 x 500 um)\n 4.2.4 depth of focus (e.g 4.1 microns)\n \"\"\"\n self.name = name\n self.chip = chip\n self.optics = optics\n self.fluid_handling_system = fluid_handling_system\n\nclass chip(object):\n\n def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=None, fluid_handling_system=None,\n material_in_optical_path=None, thickness_in_optical_path=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n #self.material = material # deprecated so the channel class can hold this information\n self.channel = channel\n self.bpe = bpe\n self.electrodes = electrodes\n self.fluid_handling_system = fluid_handling_system\n self.material_in_optical_path = material_in_optical_path\n self.thickness_in_optical_path = thickness_in_optical_path\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None, material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface # material should only hold relevant electrokinetic data\n self.material_top_wall_surface = material_top_wall_surface # material should only hold relevant elect\n self.material_fluid = material_fluid # could be a mixture of liquid materials + fluorescent particles\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,\n dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length/2, length/2, num=100)\n self.width = width\n self.height = height\n self.material = material\n\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError(\"BPE height must equal BPE material thickness\")\n\n # adhesion layer used for thin metal film BPE\n self.adhesion_material = adhesion_material\n\n # dielectric coating on top of BPE\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)\n\nclass optics(object):\n def __init__(self, microscope, fluorescent_particles=None, calibration_grid=None, pixel_to_micron_scaling=None):\n\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.60 # (microns/pixels)\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55 # (microns/pixels)\n else:\n raise ValueError(\"Unable to determine microns/pixels scaling because objective magnification not 50X or 20X\")\n\n if pixel_to_micron_scaling is not None:\n print(\"Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.\".format(self.pixel_to_micron_scaling, self.microscope.objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None, emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False,\n illumPath=None, illumSavePath=None, illumSaveName=None, showIllumPlot=False, save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath # this should come from CurlypivTestCollection\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)), dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(basePath=self.basePath, illumPath=illumPath,\n show_image=showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=save_plot,\n savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n\n self.flatfield = self.illumination_distribution\n\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,\n savePath=savePath, savename=savename, save_plot=save_plot)\n\n self.img = img\n self.mean = mean\n self.std = std\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type # e.g. Olympus iX73\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,\n vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,\n frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',\n pixels=512, pixel_size=16e-6):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n\n # supporting camera acquisition settings\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n\n if isinstance(pixels, int):\n self.pixels = (pixels, pixels)\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None, magnification=None, basePath=None, channel_height=None, illumination=None, wavelength=None, microgrid=None, auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None, field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n\n # if name is entered, then pull all the terms directly\n self.name = name\n\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n\n # general terms\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\"A wavelength is required via the <illumination> class or <wavelength> input parameter\")\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n\n if field_number:\n self.calculate_field_of_view()\n\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation, depth_of_field=self.depth_of_field, show_depth_plot=show_depth_plot, save_depth_plot=save_depth_plot,\n basePath=basePath, savename=None, channel_height=channel_height, objective=self.magnification)\n\n # grids and scaling factors\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=16e-6, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = self._wavelength*n/self.numerical_aperture**2+e*n/(self.magnification*self.numerical_aperture)\n\n def calculate_depth_of_correlation(self, eps=0.01):\n # step 0: define\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n\n # step 1: calculate the depth of correlation for the optical setup\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA, dp=dp, n=n, lmbda=lmbda, eps=eps)\n\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\"Need objective.microgrid property in order to calculate scaling factor\")\n # script to calculate scaling factor from grid\n # would go here\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None, feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n\n # find files in directory\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n\n if len(file_list) < 1:\n raise ValueError(\"No grid*.tif files found in {}\".format(self.gridPath))\n\n img_grid = np.zeros(shape=(512,512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n\n img_grid = img_grid / len(file_list)\n\n self.img_grid = img_grid\n\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None,diameter=None,fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n\n self.name = name\n self.materials=materials\n self.concentration=concentration\n self.electrophoretic_mobility=electrophoretic_mobility\n self.zeta=zeta\n self.diameter=diameter\n if diameter:\n k_b = 1.3806e-23\n T=298\n mu=0.001\n self.diffusivity = k_b*T/(6*np.pi*mu*diameter/2)\n\n self.fluorescence_spectra=fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material=None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81 # m/s**2\n\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi*self.diameter**2/4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = material.density*g*self.height_of_reservoir\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir=fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths=passing_wavelengths\n self.reflectivity=reflectivity\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None, index_of_refraction=None, transparency=None, fluorescence_spectra=None,\n permittivity=None, conductivity=None, thickness=None, youngs_modulus=None, poissons_ratio=None,\n density=None, dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n # identity\n self.name = name\n\n # geometry\n self.length = length\n self.width = width\n self.thickness = thickness\n\n # mechanical\n self.density = density\n self.concentration = concentration # For a solid, this is % by volume.\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n\n # optical\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n\n # electrochemical\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density*1e18 # (#/nm2) surface density of reaction sites: accepts nm2 and converts to m2 (see Squires)\n self.Ka = Ka # reaction equilibrium constant - upper bound\n self.Kb = Kb # reaction equilibrium constant - lower bound\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None, conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n # identity\n self.name = name\n\n # electro/chemical\n self.species = species\n self.concentration = concentration # (mmol) = (mmol/L) = (mol/m3)\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10**-pH * 1e3 # (mmol) = (mmol/L) = (mol/m3); (concentration of Hydrogen ions (H+)\n self.valence = valence\n\n # mechanical\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-9 # (m^2/s) Diffusivity of KCl in DI water [Soni]",
"step-ids": [
37,
41,
45,
48,
50
]
}
|
[
37,
41,
45,
48,
50
] |
import time
# Decorator
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return (fib_seq, end_time - start_time)
return wrapper_func
# Returns a list with first n numbers of fibonacci sequence.
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
|
normal
|
{
"blob_id": "2c39660da8fe839c4634cd73ce069acc7b1b29b4",
"index": 51,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-3": "<mask token>\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-4": "import time\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-5": "import time\n\n\n# Decorator\ndef measure_time_of_func(func):\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return (fib_seq, end_time - start_time)\n\n return wrapper_func\n\n\n# Returns a list with first n numbers of fibonacci sequence.\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root' ] );
secFiles.extend( [
] )
|
normal
|
{
"blob_id": "965bb4c8e7d6650dab7f002645dceacab59a0c5c",
"index": 7298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-3": "<mask token>\nmaxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring()\nsource = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=\n secFiles)\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nmaxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring()\nsource = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=\n secFiles)\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-5": "import FWCore.ParameterSet.Config as cms\n\nmaxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring() \nsource = cms.Source (\"PoolSource\",fileNames = readFiles, secondaryFileNames = secFiles)\nreadFiles.extend( [\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root' ] );\n\n\nsecFiles.extend( [\n ] )\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sqlalchemy import create_engine, Column, Integer, Float, \
String, Text, DateTime, Boolean, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
engine = create_engine('sqlite:///app/databases/fays-web-dev.db', connect_args={'check_same_thread':False})
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
|
normal
|
{
"blob_id": "3d2b8730953e9c2801eebc23b6fb56a1b5a55e3c",
"index": 6156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db',\n connect_args={'check_same_thread': False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-3": "from sqlalchemy import create_engine, Column, Integer, Float, String, Text, DateTime, Boolean, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db',\n connect_args={'check_same_thread': False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-4": "from sqlalchemy import create_engine, Column, Integer, Float, \\\n String, Text, DateTime, Boolean, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\n\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db', connect_args={'check_same_thread':False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 10:28:04 2020
@author: Maxi
"""
import numpy as np
from ase.io import read
from RDF_3D import pairCorrelationFunction_3D
import matplotlib.pyplot as plt
filename = r"C:\Users\Maxi\Desktop\t\Ag_HfO2_cat_3.125_222_t.cif"
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3] # only select the cell length
dr = 0.01 # shperical shell radius dr
min_length_cell = min(cell_length) # select the smalles length in cell
rmax = min_length_cell / 10
x = corrdinates[:, 0] # split the 2d array into x, y, z coordinates
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim( (0, rmax) )
plt.ylim( (0, 1.05 * g_r.max()) )
plt.show()
|
normal
|
{
"blob_id": "516d9790f40c021d45302948b7fba0cf3e00da0a",
"index": 6322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-3": "<mask token>\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 10:28:04 2020\n\n@author: Maxi\n\"\"\"\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\n \n\nfilename = r\"C:\\Users\\Maxi\\Desktop\\t\\Ag_HfO2_cat_3.125_222_t.cif\"\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3] # only select the cell length\n\ndr = 0.01 # shperical shell radius dr\nmin_length_cell = min(cell_length) # select the smalles length in cell\nrmax = min_length_cell / 10\nx = corrdinates[:, 0] # split the 2d array into x, y, z coordinates\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\n\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)\n\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim( (0, rmax) )\nplt.ylim( (0, 1.05 * g_r.max()) )\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^stats/$', views.get_stats, name='stats'),
url(r'^follow/me/$', views.follow_me, name='follow_me'),
url(r'^follower/confirm/$', views.confirm_follower, name='follower_confirm'),
url(r'^execute/', views.execute, name='executed'),
url(r'^output/', views.update_output, name='output'),
url(r'^lead/', views.lead_nodes, name='lead'),
]
|
normal
|
{
"blob_id": "33b68246dd3da9561c1d4adb5a3403cba656dcee",
"index": 9175,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^stats/$', views.get_stats, name='stats'), url(\n '^follow/me/$', views.follow_me, name='follow_me'), url(\n '^follower/confirm/$', views.confirm_follower, name='follower_confirm'),\n url('^execute/', views.execute, name='executed'), url('^output/', views\n .update_output, name='output'), url('^lead/', views.lead_nodes, name=\n 'lead')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^stats/$', views.get_stats, name='stats'), url(\n '^follow/me/$', views.follow_me, name='follow_me'), url(\n '^follower/confirm/$', views.confirm_follower, name='follower_confirm'),\n url('^execute/', views.execute, name='executed'), url('^output/', views\n .update_output, name='output'), url('^lead/', views.lead_nodes, name=\n 'lead')]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^stats/$', views.get_stats, name='stats'),\n url(r'^follow/me/$', views.follow_me, name='follow_me'),\n url(r'^follower/confirm/$', views.confirm_follower, name='follower_confirm'),\n url(r'^execute/', views.execute, name='executed'),\n url(r'^output/', views.update_output, name='output'),\n url(r'^lead/', views.lead_nodes, name='lead'),\n\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
|
normal
|
{
"blob_id": "4d0f612c74dc175766f489580fc4a492e1bfd085",
"index": 4345,
"step-1": "<mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n <mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-4": "<mask token>\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) ->List[Any]:\n if self.is_none(field):\n return None\n elif isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(',')]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) ->'Action':\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all('tr')\n for key, val in table.attrs.items():\n if key != 'class':\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find('td', class_='field-key')\n td_val = tr.find('td', class_='field-value')\n val = ''.join(str(e) for e in td_val.contents).strip()\n key = ''.join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-5": "import pandas as pd\nimport math\nimport json\nimport html\nimport bs4\nimport re\nimport dateparser\nfrom bs4 import BeautifulSoup\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Any, List, Dict, ClassVar, Union\nfrom urllib.parse import urlparse\nfrom .markdown import MarkdownData, MarkdownDocument\n\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n\n _meta_fields: ClassVar = [\"author\"]\n\n _valid_struggles: ClassVar = [\n \"ethics\",\n \"pay_and_benefits\",\n \"working_conditions\",\n \"discrimination\",\n \"unfair_labor_practices\",\n \"job_security\",\n ]\n\n _valid_actions: ClassVar = [\n \"strike\",\n \"protest\",\n \"open_letter\",\n \"legal_action\",\n \"union_drive\",\n \"union_representation\",\n ]\n\n @staticmethod\n def is_none(field: Any) -> bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == \"none\":\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) -> List[Any]:\n if self.is_none(field):\n return None\n else:\n if isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(\",\")]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n # self.date = datetime.strptime(self.date, \"%Y-%m-%d\").date()\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n\n self.workers = None if self.is_none(self.workers) else int(self.workers)\n\n # make sure action is a valid action\n assert (\n self.action in self._valid_actions\n ), f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n\n # make sure all struggles are valid struggles\n for struggle in self.struggles:\n assert (\n struggle in self._valid_struggles\n ), f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n\n # make sure source is either a url or a html link tag <a>\n for source in self.sources:\n assert (\n BeautifulSoup(source, \"html.parser\").a is not None\n or urlparse(source).netloc is not \"\"\n ), f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n\n # if html, extract only href from sources\n self.sources = [\n BeautifulSoup(source, \"html.parser\").a[\"href\"]\n if \"href\" in source\n else source\n for source in self.sources\n ]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) -> Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.items()}\n\n def render_df(self, field: str) -> str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in [\"date\", \"workers\"]:\n return str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\", \"sources\"]:\n return str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) -> str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert (\n field in self.__dataclass_fields__\n ), f\"Cannot serialize {field}. Not a valid field in Action.\"\n\n value = self.__getattribute__(field)\n\n if field in [\"date\", \"workers\"]:\n td.string = str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\"]:\n td.string = (\n str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n )\n elif field == \"sources\":\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(\", \".join(ret)), \"html.parser\"))\n else:\n td.string = value\n\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) -> \"Action\":\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all(\"tr\")\n for key, val in table.attrs.items():\n if key != \"class\":\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find(\"td\", class_=\"field-key\")\n td_val = tr.find(\"td\", class_=\"field-value\")\n val = \"\".join(str(e) for e in td_val.contents).strip()\n key = \"\".join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) -> \"Action\":\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [\n key\n for key, value in cls.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n d = {key: value for key, value in row.to_dict().items() if key in fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n\n action_id: ClassVar = \"actions\"\n actions: List[Action] = field(default_factory=lambda: [])\n fields: List[str] = field(\n default_factory=lambda: [\n key\n for key, value in Action.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n )\n\n def __len__(self) -> int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) -> \"Actions\":\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) -> pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f\"<div id={self.action_id}></div>\", \"html.parser\")\n for action in self.actions:\n table = soup.new_tag(\"table\")\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag(\"tr\")\n td_key = soup.new_tag(\"td\", attrs={\"class\": \"field-key\"})\n td_val = soup.new_tag(\"td\", attrs={\"class\": \"field-value\"})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(fr'<div id=\"{cls.action_id}\">+[\\s\\S]+<\\/div>', md_doc)\n assert len(md_data) == 1, f\"multiple divs with id={cls.action_id} were found\"\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, \"html.parser\")\n tables = soup.div.find_all(\"table\")\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-ids": [
10,
13,
19,
23,
25
]
}
|
[
10,
13,
19,
23,
25
] |
import itertools
n = int(input())
a = [list(map(int, input().split(" "))) for i in range(n)]
ans = 0
for [ix,iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2
print(ans/n)
|
normal
|
{
"blob_id": "a210a015284130f23bfec99898f2f21163a33a67",
"index": 9897,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-3": "<mask token>\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-4": "import itertools\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-5": "import itertools\nn = int(input())\na = [list(map(int, input().split(\" \"))) for i in range(n)]\nans = 0\nfor [ix,iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2\nprint(ans/n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
import os
import tempfile
import numpy as np
from keras_piecewise.backend import keras
from keras_piecewise import Piecewise2D
from .util import MaxPool2D
class TestPool2D(unittest.TestCase):
@staticmethod
def _build_model(input_shape, layer, row_num, col_num, pos_type=Piecewise2D.POS_TYPE_SEGMENTS):
data_input = keras.layers.Input(shape=input_shape)
row_input = keras.layers.Input(shape=(row_num,))
col_input = keras.layers.Input(shape=(col_num,))
pool_layer = Piecewise2D(
layer=layer,
pos_type=pos_type,
)([data_input, row_input, col_input])
model = keras.models.Model(inputs=[data_input, row_input, col_input], outputs=pool_layer)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.mean_squared_error)
model.summary()
return model
def test_max_2d(self):
data = [
[
[1, 3, 5, 2],
[2, 5, 6, 1],
[7, 1, 5, 3],
[7, 2, 2, 4],
],
[
[1, 3, 5, 2],
[2, 5, 6, 1],
[7, 1, 5, 3],
[7, 2, 2, 4],
],
]
rows = [
[2, 4],
[3, 4],
]
cols = [
[1, 2, 4],
[1, 3, 4],
]
model = self._build_model(
input_shape=(None, None),
layer=MaxPool2D(),
row_num=len(rows[0]),
col_num=len(cols[0]),
)
predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()
expected = [
[
[2.0, 5.0, 6.0],
[7.0, 2.0, 5.0],
],
[
[7.0, 6.0, 3.0],
[7.0, 2.0, 4.0],
],
]
self.assertEqual(expected, predicts)
cols = [
[1, 2, 0, 4],
[1, 3, 2, 4],
]
model = self._build_model(
input_shape=(None, None),
layer=MaxPool2D(),
row_num=len(rows[0]),
col_num=len(cols[0]),
pos_type=Piecewise2D.POS_TYPE_PAIRS,
)
model_path = os.path.join(tempfile.gettempdir(), 'keras_piece_test_save_load_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'Piecewise2D': Piecewise2D,
'MaxPool2D': MaxPool2D,
})
predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()
expected = [
[[2.0, 7.0]],
[[2.0, 4.0]],
]
self.assertEqual(expected, predicts)
def test_pos_type_not_implemented(self):
with self.assertRaises(NotImplementedError):
self._build_model(
input_shape=(None,),
layer=MaxPool2D(),
row_num=13,
col_num=17,
pos_type='whatever',
)
|
normal
|
{
"blob_id": "1af9fb91e69ea78709c47fca6b12e4f7a6fd17a8",
"index": 7392,
"step-1": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(input_shape=(None,), layer=MaxPool2D(),\n row_num=13, col_num=17, pos_type='whatever')\n",
"step-4": "import unittest\nimport os\nimport tempfile\nimport numpy as np\nfrom keras_piecewise.backend import keras\nfrom keras_piecewise import Piecewise2D\nfrom .util import MaxPool2D\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(input_shape=(None,), layer=MaxPool2D(),\n row_num=13, col_num=17, pos_type='whatever')\n",
"step-5": "import unittest\nimport os\nimport tempfile\nimport numpy as np\nfrom keras_piecewise.backend import keras\nfrom keras_piecewise import Piecewise2D\nfrom .util import MaxPool2D\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(\n layer=layer,\n pos_type=pos_type,\n )([data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [\n [\n [1, 3, 5, 2],\n [2, 5, 6, 1],\n [7, 1, 5, 3],\n [7, 2, 2, 4],\n ],\n [\n [1, 3, 5, 2],\n [2, 5, 6, 1],\n [7, 1, 5, 3],\n [7, 2, 2, 4],\n ],\n ]\n rows = [\n [2, 4],\n [3, 4],\n ]\n cols = [\n [1, 2, 4],\n [1, 3, 4],\n ]\n model = self._build_model(\n input_shape=(None, None),\n layer=MaxPool2D(),\n row_num=len(rows[0]),\n col_num=len(cols[0]),\n )\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()\n expected = [\n [\n [2.0, 5.0, 6.0],\n [7.0, 2.0, 5.0],\n ],\n [\n [7.0, 6.0, 3.0],\n [7.0, 2.0, 4.0],\n ],\n ]\n self.assertEqual(expected, predicts)\n cols = [\n [1, 2, 0, 4],\n [1, 3, 2, 4],\n ]\n model = self._build_model(\n input_shape=(None, None),\n layer=MaxPool2D(),\n row_num=len(rows[0]),\n col_num=len(cols[0]),\n pos_type=Piecewise2D.POS_TYPE_PAIRS,\n )\n model_path = os.path.join(tempfile.gettempdir(), 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D,\n 'MaxPool2D': MaxPool2D,\n })\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()\n expected = [\n [[2.0, 7.0]],\n [[2.0, 4.0]],\n ]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(\n input_shape=(None,),\n layer=MaxPool2D(),\n row_num=13,\n col_num=17,\n pos_type='whatever',\n )\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import os
import sys
from shutil import copyfile
def buildDocumentation():
"""
Build eMonitor Documentation with sphinx
:param sys.argv:
* html: build html documentation in directory */docs/output/html*
* pdf: build pdf documentation in directory */docs/output/pdf*
"""
helptext = 'usage: build_doc.py <output format> <type of documentation>' \
'\n - html: for html output' \
'\n - pdf: for pdf output' \
'\n\n - all: complete documentation' \
'\n - dev: only developer documentation' \
'\n - user: only user documentation'
if len(sys.argv) != 3:
print helptext
sys.exit(1)
if sys.argv[1] not in ['pdf', 'html']:
print helptext
sys.exit(1)
if sys.argv[2] not in ['all', 'dev', 'user']:
print helptext
sys.exit(1)
copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory
os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))
os.remove('index.rst') # delete config file from root directory
if __name__ == '__main__':
buildDocumentation()
|
normal
|
{
"blob_id": "e60c3a6aececd97ec08ae32b552bcda795375b3b",
"index": 779,
"step-1": "import os\nimport sys\nfrom shutil import copyfile\n\n\ndef buildDocumentation():\n \"\"\"\n Build eMonitor Documentation with sphinx\n\n :param sys.argv:\n\n * html: build html documentation in directory */docs/output/html*\n * pdf: build pdf documentation in directory */docs/output/pdf*\n\n \"\"\"\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory\n\nif __name__ == '__main__':\n buildDocumentation()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""After seeing how great the lmfit package, I was inspired to create my own
object using it. This acts as a fitting template.
"""
##-------------------------------PREAMBLE-----------------------------------##
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
##-------------------------------CLASS DEFINITION-----------------------------------##
class FitTemplate():
def __init__(self, fit_function, log_dir = None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
#setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,
#and is printed to log file
if log_dir is not None:
logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data,weights,**kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data)*weights)**2
def do_minimisation(self, x, data, weights = 1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError("No fit result! Do a fit before asking for")
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info((fit_report(self.fit_result)))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs):
if ax is None:
_, ax = plt.subplots(1 ,1, constrained_layout=True, figsize=(18, 9))
plt.rcParams.update({'font.size': 16})
colours = ['b','m','c','r','tab:orange', 'tab:pink']
#decide colour
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
#scatter plot
ax.scatter(x, y, color = color)
#plot errors
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)
#plot model
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
|
normal
|
{
"blob_id": "9e16921d83a5f62aad694b26a92b57b97ccda461",
"index": 1651,
"step-1": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n <mask token>\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-2": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-3": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lmfit import minimize, Parameters, fit_report\nimport logging\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-5": "\"\"\"After seeing how great the lmfit package, I was inspired to create my own\nobject using it. This acts as a fitting template. \n\"\"\"\n##-------------------------------PREAMBLE-----------------------------------##\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom lmfit import minimize, Parameters, fit_report \nimport logging \n\n##-------------------------------CLASS DEFINITION-----------------------------------##\n\nclass FitTemplate(): \n def __init__(self, fit_function, log_dir = None):\n self.fit_function = fit_function \n self.parameters = Parameters()\n self.fit_result = None\n\n #setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,\n #and is printed to log file\n if log_dir is not None: \n logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n \n\n def residuals_wrapper(self, parameters, x, data,weights,**kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data)*weights)**2\n \n def do_minimisation(self, x, data, weights = 1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None: \n raise ValueError(\"No fit result! Do a fit before asking for\")\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print() \n \n def print_fit_result(self):\n logging.info((fit_report(self.fit_result)))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs): \n\n if ax is None:\n _, ax = plt.subplots(1\t,1, constrained_layout=True, figsize=(18, 9))\n plt.rcParams.update({'font.size': 16}) \n colours = ['b','m','c','r','tab:orange', 'tab:pink']\n\n #decide colour \n if c is not None: \n color = c \n elif colour_index is not None: \n color = colours[colour_index]\n else: \n color = colours[0]\n\n #scatter plot\n ax.scatter(x, y, color = color)\n #plot errors\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)\n #plot model\n fitdomain = np.linspace(x[0], x[-1], 1000)\t\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax \n \n \t\t\n \n \n \n\n\n\n\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import numpy as np
from load_data import load_entity, load_candidates2, load_train_data
def predict_batch(test_data, model, batch_size=None):
result = model.predict(test_data, batch_size=batch_size)
return result
def predict_data(test_data, entity_path, model, predict_path, score_path, test_path, dataset):
entity_dict, id_map = load_entity(entity_path)
acc_cnt, total_cnt = 0, 0
w_l = ''
all_score = ''
for data, labels, raw_data in test_data:
total_cnt += 1
groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]
raw_entity_list = data['entity_name']
pred_result = predict_batch(data, model, batch_size=len(labels))
pred_result = [j for r in pred_result for j in r]
pred_index = np.argmax(pred_result)
pred_label = labels[pred_index]
pred_entity_name = raw_entity_list[pred_index]
#all score
all_score += doc_id + '\t' + mention
for index, score in enumerate(pred_result):
entity_id = labels[index]
entity_name = raw_entity_list[index]
all_score += '\t' + entity_id + '\t' + entity_name + '\t' + str(round(score, 4))
all_score += '\n'
if pred_label == groud_truth:
acc_cnt += 1
else:
# write wrong results down
if groud_truth in id_map:
groud_truth = id_map[groud_truth]
ground_name = ''
if '+' in groud_truth:
ground_name = groud_truth
else:
if groud_truth not in entity_dict:
ground_name = ground_name
else:
ground_name = entity_dict[groud_truth][0]
w_l += doc_id + '\t' + mention + '\t' + groud_truth + '\t' + \
ground_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
accuracy = 1.0 * acc_cnt / (total_cnt+1)
with open(predict_path, 'w', encoding='utf8')as f:
f.write(w_l)
with open(score_path, 'w', encoding='utf8')as f:
f.write(all_score)
if dataset == 'clef':
return post_predict(test_path, score_path, entity_path)
else:
return accuracy
def post_predict(test_path, score_path, entity_path, alpha=0.75):
candidate_dict = load_candidates2(score_path)
test_data, all_data = load_train_data(test_path)
entity_dict, _ = load_entity(entity_path)
acc_cnt, w_l = 0, ''
predict_dict = dict()
for mention, candidates in candidate_dict.items():
if len(candidates) == 1:
predict_dict[mention] = (candidates[0][0], candidates[0][1])
continue
max_score, max_can = candidates[0][2], candidates[0]
for e_id, e_name, e_score in candidates:
if e_score > max_score:
max_score = e_score
max_can = (e_id, e_name, e_score)
e_id, e_name, e_score = max_can
if e_score < alpha:
e_id, e_name = 'cui-less', 'cui-less'
predict_dict[mention] = (e_id, e_name)
for doc_id, mention, label in all_data:
if str.lower(label) == 'cui-less':
label = 'cui-less'
pred_label, pred_entity_name = predict_dict[mention]
if pred_label == label:
acc_cnt += 1
else:
entity_name = 'None'
if label in entity_dict:
entity_name = entity_dict[label][0]
w_l += doc_id + '\t' + mention + '\t' + label + '\t' + \
entity_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
with open('../checkpoints/post_predict_result.txt', 'w')as f:
f.write(w_l)
total_cnt = len(all_data)
accuracy = 1.0 * acc_cnt / (total_cnt)
return accuracy
if __name__ == '__main__':
flag = 1
|
normal
|
{
"blob_id": "a19616d448da057d5be0af841467a25baaacf5b3",
"index": 9299,
"step-1": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\n<mask token>\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1\n",
"step-4": "import numpy as np\nfrom load_data import load_entity, load_candidates2, load_train_data\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1\n",
"step-5": "import numpy as np\nfrom load_data import load_entity, load_candidates2, load_train_data\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path, test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n\n #all score\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(round(score, 4))\n all_score += '\\n'\n\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n # write wrong results down\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n else:\n if groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' + \\\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name + '\\n'\n\n accuracy = 1.0 * acc_cnt / (total_cnt+1)\n with open(predict_path, 'w', encoding='utf8')as f:\n f.write(w_l)\n\n with open(score_path, 'w', encoding='utf8')as f:\n f.write(all_score)\n\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n\n acc_cnt, w_l = 0, ''\n\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = (candidates[0][0], candidates[0][1])\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = (e_id, e_name, e_score)\n\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = (e_id, e_name)\n\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += doc_id + '\\t' + mention + '\\t' + label + '\\t' + \\\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name + '\\n'\n\n with open('../checkpoints/post_predict_result.txt', 'w')as f:\n f.write(w_l)\n\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / (total_cnt)\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#! /usr/bin/env python3
#
# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,
# and is Copyright (C) North Carolina State University, 2017. It is licensed
# under the three-clause BSD license; see LICENSE.
#
# -*- coding: utf-8 -*-
# python libs
import sys
import itertools
# local imports
from toboggan.dp import solve as solve_dp
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1,
bar_length=100):
"""
Call in a loop to create terminal progress bar.
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent
complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',
suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
# In the following, we replace very occurenve of 'None' in the
# weight-array by the minimum/maximum possible value (given by the
# last/the first
# non-None value next to it).
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
# If the flow value lies outside of the sum-of-weight estimates,
# the current guessed set of weights is infeasible.
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1,
max_weight_upper=float('inf'), scoring="sink distance"):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
# quit right away if the instance has weight bounds that can't be satisfied
if instance.has_bad_bounds():
return set()
# if k equals the size of the largest edge cut, the weights are
# predetermined
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
# Important: path weights must be sorted, otherwise our
# subsequent optimizations will remove this constraint.
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight,
instance.weights))
if not silent:
print(instance.weights, feasible_weights)
# figure out whether we get the first or last positions for free
largest_free = False
smallest_free = False
# check largest weight first
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k-int(largest_free)))
# iterate over the number of unguessed weights
for diff in range(k+1):
if not silent:
print("Diff =", diff)
# iterate over positions of guessed weights. We want them to be
# ordered, but choose the smallest first to be removed
for rev_indices in itertools.combinations(reversed(positions), k-diff):
indices = list(reversed(rev_indices))
p = len(indices)
# when k-1 values are determined, it also determines the kth value
if p == k-1:
continue
# iterate over choices for those guessed weights
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
# assign the chosen weights to the guessed positions
for p, w in zip(indices, chosen_weights):
weights[p] = w
# add in free values
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k-1] = largest
# quit if this didn't work
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print("Trying weights", weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print("Unterdetermined solution")
return sol
|
normal
|
{
"blob_id": "1b4c9841fd10d065983974e93fe5dcbe048c1281",
"index": 4180,
"step-1": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-3": "<mask token>\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-4": "import sys\nimport itertools\nfrom toboggan.dp import solve as solve_dp\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-5": "#! /usr/bin/env python3\n#\n# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,\n# and is Copyright (C) North Carolina State University, 2017. It is licensed\n# under the three-clause BSD license; see LICENSE.\n#\n# -*- coding: utf-8 -*-\n# python libs\nimport sys\nimport itertools\n# local imports\nfrom toboggan.dp import solve as solve_dp\n\n\n# Print iterations progress\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',\n suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n # In the following, we replace very occurenve of 'None' in the\n # weight-array by the minimum/maximum possible value (given by the\n # last/the first\n # non-None value next to it).\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n\n # If the flow value lies outside of the sum-of-weight estimates,\n # the current guessed set of weights is infeasible.\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
########################################
__author__ = "Abdelrahman Eldesokey"
__license__ = "GNU GPLv3"
__version__ = "0.1"
__maintainer__ = "Abdelrahman Eldesokey"
__email__ = "[email protected]"
########################################
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
import numpy as np
from scipy.stats import poisson
from scipy import signal
from utils.util import retrieve_elements_from_indices
# The proposed Normalized Convolution Layer
class NConv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus',
init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
# Call _ConvNd constructor
super(NConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, False, _pair(0), groups, bias, padding_mode)
self.eps = 1e-20
self.pos_fn = pos_fn
self.init_method = init_method
# Initialize weights and bias
self.init_parameters()
if self.pos_fn is not None:
EnforcePos.apply(self, 'weight', pos_fn)
def forward(self, data, conf):
# Normalized Convolution
denom = F.conv2d(conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nomin = F.conv2d(data*conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nconv = nomin / (denom+self.eps)
# Add bias
b = self.bias
sz = b.size(0)
b = b.view(1,sz,1,1)
b = b.expand_as(nconv)
nconv += b
# Propagate confidence
cout = denom
sz = cout.size()
cout = cout.view(sz[0], sz[1], -1)
k = self.weight
k_sz = k.size()
k = k.view(k_sz[0], -1)
s = torch.sum(k, dim=-1, keepdim=True)
cout = cout / s
cout = cout.view(sz)
return nconv, cout
def init_parameters(self):
# Init weights
if self.init_method == 'x': # Xavier
torch.nn.init.xavier_uniform_(self.weight)
elif self.init_method == 'k': # Kaiming
torch.nn.init.kaiming_uniform_(self.weight)
elif self.init_method == 'p': # Poisson
mu=self.kernel_size[0]/2
dist = poisson(mu)
x = np.arange(0, self.kernel_size[0])
y = np.expand_dims(dist.pmf(x),1)
w = signal.convolve2d(y, y.transpose(), 'full')
w = torch.tensor(w).type_as(self.weight)
w = torch.unsqueeze(w,0)
w = torch.unsqueeze(w,1)
w = w.repeat(self.out_channels, 1, 1, 1)
w = w.repeat(1, self.in_channels, 1, 1)
self.weight.data = w + torch.rand(w.shape)
# Init bias
self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)
# My modification is in this class
# Non-negativity enforcement class
class EnforcePos(object):
def __init__(self, pos_fn, name):
self.name = name
self.pos_fn = pos_fn
@staticmethod
def apply(module, name, pos_fn):
fn = EnforcePos(pos_fn, name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name+'_pre', Parameter(weight.data))
setattr(module, name, fn._pos(getattr(module, name+'_pre')))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, inputs):
#if module.training:
# weight = getattr(module, self.name)
#del module._parameters[self.name]
pos_weight = self._pos(getattr(module, self.name+'_pre'))
setattr(module, self.name, pos_weight)
#else:
# pass
def _pos(self, p):
pos_fn = self.pos_fn.lower()
if pos_fn == 'softmax':
p_sz = p.size()
p = p.view(p_sz[0],p_sz[1], -1)
p = F.softmax(p, -1)
return p.view(p_sz)
elif pos_fn == 'exp':
return torch.exp(p)
elif pos_fn == 'softplus':
return F.softplus(p, beta=10)
elif pos_fn == 'sigmoid':
return F.sigmoid(p)
else:
print('Undefined positive function!')
return
class NormCNN(nn.Module):
def __init__(self, pos_fn=None, num_channels=2):
super().__init__()
self.pos_fn = pos_fn
self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')
def forward(self, x0, c0):
x1, c1 = self.nconv1(x0, c0)
x1, c1 = self.nconv2(x1, c1)
x1, c1 = self.nconv3(x1, c1)
# Downsample 1
ds = 2
c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)
x1_ds = retrieve_elements_from_indices(x1, idx)
c1_ds /= 4
x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)
x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)
# Downsample 2
ds = 2
c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)
x2_dss = retrieve_elements_from_indices(x2_ds, idx)
c2_dss /= 4
x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)
# Downsample 3
ds = 2
c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)
x3_dss = retrieve_elements_from_indices(x3_ds, idx)
c3_dss /= 4
x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)
# Upsample 1
x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')
c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')
x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1))
# Upsample 2
x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')
c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')
x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1))
# Upsample 3
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')
c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')
xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1))
xout, cout = self.nconv7(xout, cout)
return xout, cout
class PretrainedCNN(nn.Module):
def __init__(self, pos_fn=None, num_channels=2):
super().__init__()
self.pos_fn = pos_fn
self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)
self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)
self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)
self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)
self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)
self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)
def forward(self, x0, c0):
x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)
x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)
x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)
ds = 2
c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)
x1_ds = torch.zeros(c1_ds.size()).cuda()
for i in range(x1_ds.size(0)):
for j in range(x1_ds.size(1)):
x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c1_ds /= 4
x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)
x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)
ds = 2
c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)
x2_dss = torch.zeros(c2_dss.size()).cuda()
for i in range(x2_dss.size(0)):
for j in range(x2_dss.size(1)):
x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c2_dss /= 4
x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2)
# x3_ds, c3_ds = self.navg_forward(self.navg3, c3_ds, x3_ds, self.bias3)
ds = 2
c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)
x3_dss = torch.zeros(c3_dss.size()).cuda()
for i in range(x3_dss.size(0)):
for j in range(x3_dss.size(1)):
x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c3_dss /= 4
x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2)
x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')
c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')
x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds, c4), 1), torch.cat((x3_ds, x4), 1),
self.bias34)
x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')
c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')
x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds, c34), 1), torch.cat((x2_ds, x34), 1),
self.bias23)
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')
c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')
xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1), torch.cat((x23, x1), 1), self.bias12)
xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)
return xout, cout
def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):
# Normalized Averaging
ca = navg(c)
xout = torch.div(navg(x * c), ca + eps)
# Add bias
sz = b.size(0)
b = b.view(1, sz, 1, 1)
b = b.expand_as(xout)
xout = xout + b
if restore:
cm = (c == 0).float()
xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)
# Propagate confidence
# cout = torch.ne(ca, 0).float()
cout = ca
sz = cout.size()
cout = cout.view(sz[0], sz[1], -1)
k = navg.weight
k_sz = k.size()
k = k.view(k_sz[0], -1)
s = torch.sum(k, dim=-1, keepdim=True)
cout = cout / s
cout = cout.view(sz)
k = k.view(k_sz)
return xout, cout
def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False,
groups=1):
navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,
padding=(kernel_size[0] // 2, kernel_size[1] // 2), bias=False, groups=groups)
weights = navg.weight
if initalizer == 'x': # Xavier
torch.nn.init.xavier_uniform(weights)
elif initalizer == 'k':
torch.nn.init.kaiming_uniform(weights)
elif initalizer == 'p':
mu = kernel_size[0] / 2
dist = poisson(mu)
x = np.arange(0, kernel_size[0])
y = np.expand_dims(dist.pmf(x), 1)
w = signal.convolve2d(y, y.transpose(), 'full')
w = torch.from_numpy(w).float().cuda()
w = torch.unsqueeze(w, 0)
w = torch.unsqueeze(w, 1)
w = w.repeat(out_channels, 1, 1, 1)
w = w.repeat(1, in_channels, 1, 1)
weights.data = w + torch.rand(w.shape).cuda()
return navg
if __name__ == '__main__':
ncnn = NormCNN(pos_fn='softplus')
print(ncnn.__str__())
|
normal
|
{
"blob_id": "64b4deaad548a38ba646423d33fc6a985483a042",
"index": 3592,
"step-1": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n <mask token>\n <mask token>\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n",
"step-4": "__author__ = 'Abdelrahman Eldesokey'\n__license__ = 'GNU GPLv3'\n__version__ = '0.1'\n__maintainer__ = 'Abdelrahman Eldesokey'\n__email__ = '[email protected]'\n<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n",
"step-5": "########################################\n__author__ = \"Abdelrahman Eldesokey\"\n__license__ = \"GNU GPLv3\"\n__version__ = \"0.1\"\n__maintainer__ = \"Abdelrahman Eldesokey\"\n__email__ = \"[email protected]\"\n########################################\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn.modules.conv import _ConvNd\nfrom torch.nn.modules.utils import _pair\nfrom torch.nn.parameter import Parameter\nimport numpy as np\nfrom scipy.stats import poisson\nfrom scipy import signal\n\nfrom utils.util import retrieve_elements_from_indices\n\n# The proposed Normalized Convolution Layer\nclass NConv2d(_ConvNd):\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus',\n init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n # Call _ConvNd constructor\n super(NConv2d, self).__init__(in_channels, out_channels, kernel_size,\n stride, padding, dilation, False, _pair(0), groups, bias, padding_mode)\n \n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n \n # Initialize weights and bias\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n \n def forward(self, data, conf):\n # Normalized Convolution\n denom = F.conv2d(conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nomin = F.conv2d(data*conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nconv = nomin / (denom+self.eps)\n\n # Add bias\n b = self.bias\n sz = b.size(0)\n b = b.view(1,sz,1,1)\n b = b.expand_as(nconv)\n nconv += b\n \n # Propagate confidence\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n \n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True) \n\n cout = cout / s\n cout = cout.view(sz)\n \n return nconv, cout\n\n def init_parameters(self):\n # Init weights\n if self.init_method == 'x': # Xavier \n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k': # Kaiming\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p': # Poisson\n mu=self.kernel_size[0]/2 \n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x),1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w,0)\n w = torch.unsqueeze(w,1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n \n # Init bias\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)\n \n# My modification is in this class\n# Non-negativity enforcement class \nclass EnforcePos(object):\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name+'_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name+'_pre')))\n module.register_forward_pre_hook(fn) \n\n return fn\n\n def __call__(self, module, inputs):\n #if module.training:\n # weight = getattr(module, self.name)\n #del module._parameters[self.name]\n pos_weight = self._pos(getattr(module, self.name+'_pre'))\n setattr(module, self.name, pos_weight)\n #else:\n # pass\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0],p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n\n self.pos_fn = pos_fn\n\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)\n\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n\n # Downsample 1\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n\n # Downsample 2\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n\n # Downsample 3\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n\n # Upsample 1\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1))\n\n # Upsample 2\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1))\n\n # Upsample 3\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1))\n\n xout, cout = self.nconv7(xout, cout)\n\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n\n self.pos_fn = pos_fn\n\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n\n c1_ds /= 4\n\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2)\n\n # x3_ds, c3_ds = self.navg_forward(self.navg3, c3_ds, x3_ds, self.bias3)\n\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2)\n\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds, c4), 1), torch.cat((x3_ds, x4), 1),\n self.bias34)\n\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds, c34), 1), torch.cat((x2_ds, x34), 1),\n self.bias23)\n\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1), torch.cat((x23, x1), 1), self.bias12)\n\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n\n # Normalized Averaging\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n\n # Add bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n\n # Propagate confidence\n # cout = torch.ne(ca, 0).float()\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n\n cout = cout / s\n\n cout = cout.view(sz)\n k = k.view(k_sz)\n\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False,\n groups=1):\n\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,\n padding=(kernel_size[0] // 2, kernel_size[1] // 2), bias=False, groups=groups)\n\n weights = navg.weight\n\n if initalizer == 'x': # Xavier\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n",
"step-ids": [
15,
17,
18,
19,
21
]
}
|
[
15,
17,
18,
19,
21
] |
#!/usr/bin/env python3
import json
import sqlite3
import sys
from scorelib import *
#from .scorelib import *
from collections import defaultdict
def __map2list(mp):
if len(mp.keys()) == 0:
return []
lst = [None] * max(mp.keys())
for idx in mp.keys():
lst[idx-1] = mp[idx]
return lst
def __translate_keys(translation_schema):
def f(obj):
schema = translation_schema.get(type(obj))
if schema is None:
return obj.__dict__
res = {}
for key in obj.__dict__:
res[schema.get(key, key)] = obj.__dict__[key]
return res
return f
def __to_bool(val):
if val == 'Y':
return True
elif val == 'N':
return False
else:
return None
def search(substr):
connection = sqlite3.connect('scorelib.dat')
result = defaultdict(lambda: [])
for person_id, person_name in connection.execute(r"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'", (substr, )):
root_composer = person_name
for (score_id, score_name, score_genre, score_incipit, score_key, score_year) in connection.execute(r"SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?", (person_id, )):
voicesMap = {}
for voice_name, voice_range, voice_number in connection.execute(r"SELECT name, range, number FROM voice WHERE score = ?", (score_id, )):
voicesMap[voice_number] = Voice(voice_name, voice_range)
composers = []
for c_name, c_born, c_died in connection.execute(r"SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?", (score_id,)):
composers.append(Person(c_name, c_born, c_died))
composition = Composition(score_name, score_incipit, score_key, score_genre, score_year, __map2list(voicesMap), composers)
for edition_id, edition_name, edition_year in connection.execute(r"SELECT id, name, year FROM edition WHERE score = ?", (score_id,)):
editors = []
for e_name, e_born, e_died in connection.execute(r"SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?", (edition_id,)):
editors.append(Person(e_name, e_born, e_died))
edition = Edition(composition, editors, edition_name)
for print_id, print_part in connection.execute(r"SELECT id, partiture FROM print WHERE edition = ?", (edition_id, )):
print = Print(edition, print_id, __to_bool(print_part))
result[root_composer].append({"Print Number": print.print_id,
"Composer": composition.authors,
"Title": composition.name,
"Genre": composition.genre,
"Key": composition.key,
"Composition Year": composition.year,
"Edition": edition.name,
"Voices": __map2list(voicesMap),
"Editor": edition.authors,
"Partiture": print.partiture,
"Incipit": composition.incipit})
json.dump(result,
sys.stdout,
default=__translate_keys({Print: {"print_id": "Print Number", "partiture": "Partiture", "edition": "Edition"},
Edition: {"authors": "Editors", "name": "Name", "composition": "Composition"},
Composition: {"name": "Name", "incipit": "Incipit", "key": "Key", "genre": "Genre", "year": "Composition Year", "voices": "Voices", "authors": "Composer"},
Voice: {"name": "Name", "range": "Range"},
Person: {"name": "Name", "born": "Born", "died": "Died"}}),
indent=4,
ensure_ascii=False)
return
def main(args):
text = ' '.join(args).strip()
if text == '':
json.dump({}, sys.stdout)
return
search(text)
main(sys.argv[1:])
|
normal
|
{
"blob_id": "9f6e5c219f7b668720b5379dde912ff22ef434d1",
"index": 9072,
"step-1": "<mask token>\n\n\ndef __map2list(mp):\n if len(mp.keys()) == 0:\n return []\n lst = [None] * max(mp.keys())\n for idx in mp.keys():\n lst[idx - 1] = mp[idx]\n return lst\n\n\ndef __translate_keys(translation_schema):\n\n def f(obj):\n schema = translation_schema.get(type(obj))\n if schema is None:\n return obj.__dict__\n res = {}\n for key in obj.__dict__:\n res[schema.get(key, key)] = obj.__dict__[key]\n return res\n return f\n\n\ndef __to_bool(val):\n if val == 'Y':\n return True\n elif val == 'N':\n return False\n else:\n return None\n\n\ndef search(substr):\n connection = sqlite3.connect('scorelib.dat')\n result = defaultdict(lambda : [])\n for person_id, person_name in connection.execute(\n \"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'\", (\n substr,)):\n root_composer = person_name\n for score_id, score_name, score_genre, score_incipit, score_key, score_year in connection.execute(\n 'SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?'\n , (person_id,)):\n voicesMap = {}\n for voice_name, voice_range, voice_number in connection.execute(\n 'SELECT name, range, number FROM voice WHERE score = ?', (\n score_id,)):\n voicesMap[voice_number] = Voice(voice_name, voice_range)\n composers = []\n for c_name, c_born, c_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?'\n , (score_id,)):\n composers.append(Person(c_name, c_born, c_died))\n composition = Composition(score_name, score_incipit, score_key,\n score_genre, score_year, __map2list(voicesMap), composers)\n for edition_id, edition_name, edition_year in connection.execute(\n 'SELECT id, name, year FROM edition WHERE score = ?', (\n score_id,)):\n editors = []\n for e_name, e_born, e_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?'\n , (edition_id,)):\n editors.append(Person(e_name, e_born, e_died))\n edition = Edition(composition, editors, edition_name)\n for print_id, print_part in connection.execute(\n 'SELECT id, partiture FROM print WHERE edition = ?', (\n edition_id,)):\n print = Print(edition, print_id, __to_bool(print_part))\n result[root_composer].append({'Print Number': print.\n print_id, 'Composer': composition.authors, 'Title':\n composition.name, 'Genre': composition.genre, 'Key':\n composition.key, 'Composition Year': composition.\n year, 'Edition': edition.name, 'Voices': __map2list\n (voicesMap), 'Editor': edition.authors, 'Partiture':\n print.partiture, 'Incipit': composition.incipit})\n json.dump(result, sys.stdout, default=__translate_keys({Print: {\n 'print_id': 'Print Number', 'partiture': 'Partiture', 'edition':\n 'Edition'}, Edition: {'authors': 'Editors', 'name': 'Name',\n 'composition': 'Composition'}, Composition: {'name': 'Name',\n 'incipit': 'Incipit', 'key': 'Key', 'genre': 'Genre', 'year':\n 'Composition Year', 'voices': 'Voices', 'authors': 'Composer'},\n Voice: {'name': 'Name', 'range': 'Range'}, Person: {'name': 'Name',\n 'born': 'Born', 'died': 'Died'}}), indent=4, ensure_ascii=False)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef __map2list(mp):\n if len(mp.keys()) == 0:\n return []\n lst = [None] * max(mp.keys())\n for idx in mp.keys():\n lst[idx - 1] = mp[idx]\n return lst\n\n\ndef __translate_keys(translation_schema):\n\n def f(obj):\n schema = translation_schema.get(type(obj))\n if schema is None:\n return obj.__dict__\n res = {}\n for key in obj.__dict__:\n res[schema.get(key, key)] = obj.__dict__[key]\n return res\n return f\n\n\ndef __to_bool(val):\n if val == 'Y':\n return True\n elif val == 'N':\n return False\n else:\n return None\n\n\ndef search(substr):\n connection = sqlite3.connect('scorelib.dat')\n result = defaultdict(lambda : [])\n for person_id, person_name in connection.execute(\n \"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'\", (\n substr,)):\n root_composer = person_name\n for score_id, score_name, score_genre, score_incipit, score_key, score_year in connection.execute(\n 'SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?'\n , (person_id,)):\n voicesMap = {}\n for voice_name, voice_range, voice_number in connection.execute(\n 'SELECT name, range, number FROM voice WHERE score = ?', (\n score_id,)):\n voicesMap[voice_number] = Voice(voice_name, voice_range)\n composers = []\n for c_name, c_born, c_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?'\n , (score_id,)):\n composers.append(Person(c_name, c_born, c_died))\n composition = Composition(score_name, score_incipit, score_key,\n score_genre, score_year, __map2list(voicesMap), composers)\n for edition_id, edition_name, edition_year in connection.execute(\n 'SELECT id, name, year FROM edition WHERE score = ?', (\n score_id,)):\n editors = []\n for e_name, e_born, e_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?'\n , (edition_id,)):\n editors.append(Person(e_name, e_born, e_died))\n edition = Edition(composition, editors, edition_name)\n for print_id, print_part in connection.execute(\n 'SELECT id, partiture FROM print WHERE edition = ?', (\n edition_id,)):\n print = Print(edition, print_id, __to_bool(print_part))\n result[root_composer].append({'Print Number': print.\n print_id, 'Composer': composition.authors, 'Title':\n composition.name, 'Genre': composition.genre, 'Key':\n composition.key, 'Composition Year': composition.\n year, 'Edition': edition.name, 'Voices': __map2list\n (voicesMap), 'Editor': edition.authors, 'Partiture':\n print.partiture, 'Incipit': composition.incipit})\n json.dump(result, sys.stdout, default=__translate_keys({Print: {\n 'print_id': 'Print Number', 'partiture': 'Partiture', 'edition':\n 'Edition'}, Edition: {'authors': 'Editors', 'name': 'Name',\n 'composition': 'Composition'}, Composition: {'name': 'Name',\n 'incipit': 'Incipit', 'key': 'Key', 'genre': 'Genre', 'year':\n 'Composition Year', 'voices': 'Voices', 'authors': 'Composer'},\n Voice: {'name': 'Name', 'range': 'Range'}, Person: {'name': 'Name',\n 'born': 'Born', 'died': 'Died'}}), indent=4, ensure_ascii=False)\n return\n\n\ndef main(args):\n text = ' '.join(args).strip()\n if text == '':\n json.dump({}, sys.stdout)\n return\n search(text)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef __map2list(mp):\n if len(mp.keys()) == 0:\n return []\n lst = [None] * max(mp.keys())\n for idx in mp.keys():\n lst[idx - 1] = mp[idx]\n return lst\n\n\ndef __translate_keys(translation_schema):\n\n def f(obj):\n schema = translation_schema.get(type(obj))\n if schema is None:\n return obj.__dict__\n res = {}\n for key in obj.__dict__:\n res[schema.get(key, key)] = obj.__dict__[key]\n return res\n return f\n\n\ndef __to_bool(val):\n if val == 'Y':\n return True\n elif val == 'N':\n return False\n else:\n return None\n\n\ndef search(substr):\n connection = sqlite3.connect('scorelib.dat')\n result = defaultdict(lambda : [])\n for person_id, person_name in connection.execute(\n \"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'\", (\n substr,)):\n root_composer = person_name\n for score_id, score_name, score_genre, score_incipit, score_key, score_year in connection.execute(\n 'SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?'\n , (person_id,)):\n voicesMap = {}\n for voice_name, voice_range, voice_number in connection.execute(\n 'SELECT name, range, number FROM voice WHERE score = ?', (\n score_id,)):\n voicesMap[voice_number] = Voice(voice_name, voice_range)\n composers = []\n for c_name, c_born, c_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?'\n , (score_id,)):\n composers.append(Person(c_name, c_born, c_died))\n composition = Composition(score_name, score_incipit, score_key,\n score_genre, score_year, __map2list(voicesMap), composers)\n for edition_id, edition_name, edition_year in connection.execute(\n 'SELECT id, name, year FROM edition WHERE score = ?', (\n score_id,)):\n editors = []\n for e_name, e_born, e_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?'\n , (edition_id,)):\n editors.append(Person(e_name, e_born, e_died))\n edition = Edition(composition, editors, edition_name)\n for print_id, print_part in connection.execute(\n 'SELECT id, partiture FROM print WHERE edition = ?', (\n edition_id,)):\n print = Print(edition, print_id, __to_bool(print_part))\n result[root_composer].append({'Print Number': print.\n print_id, 'Composer': composition.authors, 'Title':\n composition.name, 'Genre': composition.genre, 'Key':\n composition.key, 'Composition Year': composition.\n year, 'Edition': edition.name, 'Voices': __map2list\n (voicesMap), 'Editor': edition.authors, 'Partiture':\n print.partiture, 'Incipit': composition.incipit})\n json.dump(result, sys.stdout, default=__translate_keys({Print: {\n 'print_id': 'Print Number', 'partiture': 'Partiture', 'edition':\n 'Edition'}, Edition: {'authors': 'Editors', 'name': 'Name',\n 'composition': 'Composition'}, Composition: {'name': 'Name',\n 'incipit': 'Incipit', 'key': 'Key', 'genre': 'Genre', 'year':\n 'Composition Year', 'voices': 'Voices', 'authors': 'Composer'},\n Voice: {'name': 'Name', 'range': 'Range'}, Person: {'name': 'Name',\n 'born': 'Born', 'died': 'Died'}}), indent=4, ensure_ascii=False)\n return\n\n\ndef main(args):\n text = ' '.join(args).strip()\n if text == '':\n json.dump({}, sys.stdout)\n return\n search(text)\n\n\nmain(sys.argv[1:])\n",
"step-4": "import json\nimport sqlite3\nimport sys\nfrom scorelib import *\nfrom collections import defaultdict\n\n\ndef __map2list(mp):\n if len(mp.keys()) == 0:\n return []\n lst = [None] * max(mp.keys())\n for idx in mp.keys():\n lst[idx - 1] = mp[idx]\n return lst\n\n\ndef __translate_keys(translation_schema):\n\n def f(obj):\n schema = translation_schema.get(type(obj))\n if schema is None:\n return obj.__dict__\n res = {}\n for key in obj.__dict__:\n res[schema.get(key, key)] = obj.__dict__[key]\n return res\n return f\n\n\ndef __to_bool(val):\n if val == 'Y':\n return True\n elif val == 'N':\n return False\n else:\n return None\n\n\ndef search(substr):\n connection = sqlite3.connect('scorelib.dat')\n result = defaultdict(lambda : [])\n for person_id, person_name in connection.execute(\n \"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'\", (\n substr,)):\n root_composer = person_name\n for score_id, score_name, score_genre, score_incipit, score_key, score_year in connection.execute(\n 'SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?'\n , (person_id,)):\n voicesMap = {}\n for voice_name, voice_range, voice_number in connection.execute(\n 'SELECT name, range, number FROM voice WHERE score = ?', (\n score_id,)):\n voicesMap[voice_number] = Voice(voice_name, voice_range)\n composers = []\n for c_name, c_born, c_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?'\n , (score_id,)):\n composers.append(Person(c_name, c_born, c_died))\n composition = Composition(score_name, score_incipit, score_key,\n score_genre, score_year, __map2list(voicesMap), composers)\n for edition_id, edition_name, edition_year in connection.execute(\n 'SELECT id, name, year FROM edition WHERE score = ?', (\n score_id,)):\n editors = []\n for e_name, e_born, e_died in connection.execute(\n 'SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?'\n , (edition_id,)):\n editors.append(Person(e_name, e_born, e_died))\n edition = Edition(composition, editors, edition_name)\n for print_id, print_part in connection.execute(\n 'SELECT id, partiture FROM print WHERE edition = ?', (\n edition_id,)):\n print = Print(edition, print_id, __to_bool(print_part))\n result[root_composer].append({'Print Number': print.\n print_id, 'Composer': composition.authors, 'Title':\n composition.name, 'Genre': composition.genre, 'Key':\n composition.key, 'Composition Year': composition.\n year, 'Edition': edition.name, 'Voices': __map2list\n (voicesMap), 'Editor': edition.authors, 'Partiture':\n print.partiture, 'Incipit': composition.incipit})\n json.dump(result, sys.stdout, default=__translate_keys({Print: {\n 'print_id': 'Print Number', 'partiture': 'Partiture', 'edition':\n 'Edition'}, Edition: {'authors': 'Editors', 'name': 'Name',\n 'composition': 'Composition'}, Composition: {'name': 'Name',\n 'incipit': 'Incipit', 'key': 'Key', 'genre': 'Genre', 'year':\n 'Composition Year', 'voices': 'Voices', 'authors': 'Composer'},\n Voice: {'name': 'Name', 'range': 'Range'}, Person: {'name': 'Name',\n 'born': 'Born', 'died': 'Died'}}), indent=4, ensure_ascii=False)\n return\n\n\ndef main(args):\n text = ' '.join(args).strip()\n if text == '':\n json.dump({}, sys.stdout)\n return\n search(text)\n\n\nmain(sys.argv[1:])\n",
"step-5": "#!/usr/bin/env python3\r\nimport json\r\nimport sqlite3\r\nimport sys\r\nfrom scorelib import *\r\n#from .scorelib import *\r\nfrom collections import defaultdict\r\n\r\n\r\ndef __map2list(mp):\r\n if len(mp.keys()) == 0:\r\n return []\r\n lst = [None] * max(mp.keys())\r\n for idx in mp.keys():\r\n lst[idx-1] = mp[idx]\r\n return lst\r\n\r\ndef __translate_keys(translation_schema):\r\n def f(obj):\r\n schema = translation_schema.get(type(obj))\r\n if schema is None:\r\n return obj.__dict__\r\n res = {}\r\n for key in obj.__dict__:\r\n res[schema.get(key, key)] = obj.__dict__[key]\r\n return res\r\n return f\r\n\r\n\r\ndef __to_bool(val):\r\n if val == 'Y':\r\n return True\r\n elif val == 'N':\r\n return False\r\n else:\r\n return None\r\n\r\n\r\ndef search(substr):\r\n connection = sqlite3.connect('scorelib.dat')\r\n result = defaultdict(lambda: [])\r\n for person_id, person_name in connection.execute(r\"SELECT id, name FROM person WHERE name LIKE '%' || ? || '%'\", (substr, )):\r\n root_composer = person_name\r\n for (score_id, score_name, score_genre, score_incipit, score_key, score_year) in connection.execute(r\"SELECT score.id, score.name, score.genre, score.incipit, score.key, score.year FROM score JOIN score_author a on score.id = a.score WHERE a.composer = ?\", (person_id, )):\r\n voicesMap = {}\r\n for voice_name, voice_range, voice_number in connection.execute(r\"SELECT name, range, number FROM voice WHERE score = ?\", (score_id, )):\r\n voicesMap[voice_number] = Voice(voice_name, voice_range)\r\n composers = []\r\n for c_name, c_born, c_died in connection.execute(r\"SELECT person.name, person.born, person.died FROM score_author JOIN person ON score_author.composer = person.id WHERE score_author.score = ?\", (score_id,)):\r\n composers.append(Person(c_name, c_born, c_died))\r\n composition = Composition(score_name, score_incipit, score_key, score_genre, score_year, __map2list(voicesMap), composers)\r\n for edition_id, edition_name, edition_year in connection.execute(r\"SELECT id, name, year FROM edition WHERE score = ?\", (score_id,)):\r\n editors = []\r\n for e_name, e_born, e_died in connection.execute(r\"SELECT person.name, person.born, person.died FROM edition_author JOIN person ON edition_author.editor = person.id WHERE edition_author.edition = ?\", (edition_id,)):\r\n editors.append(Person(e_name, e_born, e_died))\r\n edition = Edition(composition, editors, edition_name)\r\n for print_id, print_part in connection.execute(r\"SELECT id, partiture FROM print WHERE edition = ?\", (edition_id, )):\r\n print = Print(edition, print_id, __to_bool(print_part))\r\n result[root_composer].append({\"Print Number\": print.print_id,\r\n \"Composer\": composition.authors,\r\n \"Title\": composition.name,\r\n \"Genre\": composition.genre,\r\n \"Key\": composition.key,\r\n \"Composition Year\": composition.year,\r\n \"Edition\": edition.name,\r\n \"Voices\": __map2list(voicesMap),\r\n \"Editor\": edition.authors,\r\n \"Partiture\": print.partiture,\r\n \"Incipit\": composition.incipit})\r\n json.dump(result,\r\n sys.stdout,\r\n default=__translate_keys({Print: {\"print_id\": \"Print Number\", \"partiture\": \"Partiture\", \"edition\": \"Edition\"},\r\n Edition: {\"authors\": \"Editors\", \"name\": \"Name\", \"composition\": \"Composition\"},\r\n Composition: {\"name\": \"Name\", \"incipit\": \"Incipit\", \"key\": \"Key\", \"genre\": \"Genre\", \"year\": \"Composition Year\", \"voices\": \"Voices\", \"authors\": \"Composer\"},\r\n Voice: {\"name\": \"Name\", \"range\": \"Range\"},\r\n Person: {\"name\": \"Name\", \"born\": \"Born\", \"died\": \"Died\"}}),\r\n indent=4,\r\n ensure_ascii=False)\r\n return\r\n\r\n\r\ndef main(args):\r\n text = ' '.join(args).strip()\r\n if text == '':\r\n json.dump({}, sys.stdout)\r\n return\r\n search(text)\r\n\r\n\r\nmain(sys.argv[1:])",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from eboss_qso.fits.joint import run_joint_mcmc_fit
from eboss_qso.measurements.utils import make_hash
import os.path as osp
import os
from glob import glob
ARGS = [(False, 1.0),
(False, 1.6),
(True, 1.6),
(True, 1.0)
]
ITERATIONS = 500
WALKERS = 100
def main(argnum, kmin):
z_weighted, p = ARGS[argnum]
# the data to load
kws = {}
kws['version'] = 'v1.9f'
kws['krange'] = '%s-0.3' % kmin
kws['params'] = 'basemodel-N-fnl'
kws['zrange'] = '0.8-2.2'
kws['z_weighted'] = z_weighted
kws['p'] = p
kws['ells'] = [0]
hashstr = make_hash(kws)
# output directory
output = osp.join(os.environ['EBOSS_FITS'], 'data')
output = osp.join(output, kws['version'],
kws['krange'], kws['params'], kws['zrange'])
output = osp.join(output, 'QSO-N+S-%s' % hashstr)
if not osp.exists(output):
os.makedirs(output)
# output file name
i = len(glob(osp.join(output, '*npz')))
output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))
print(output)
# run
run_joint_mcmc_fit('data', ITERATIONS, WALKERS,
output, kws, joint_params=['f_nl'])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("argnum", type=int, choices=[0, 1, 2, 3])
parser.add_argument('kmin', type=str, choices=["0.0001", "0.005"])
ns = parser.parse_args()
main(ns.argnum, ns.kmin)
|
normal
|
{
"blob_id": "a40c87fe4b805495e5bd30155faa861cbe16c368",
"index": 6123,
"step-1": "<mask token>\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-3": "<mask token>\nARGS = [(False, 1.0), (False, 1.6), (True, 1.6), (True, 1.0)]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-4": "from eboss_qso.fits.joint import run_joint_mcmc_fit\nfrom eboss_qso.measurements.utils import make_hash\nimport os.path as osp\nimport os\nfrom glob import glob\nARGS = [(False, 1.0), (False, 1.6), (True, 1.6), (True, 1.0)]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-5": "from eboss_qso.fits.joint import run_joint_mcmc_fit\nfrom eboss_qso.measurements.utils import make_hash\nimport os.path as osp\nimport os\nfrom glob import glob\n\n\nARGS = [(False, 1.0),\n (False, 1.6),\n (True, 1.6),\n (True, 1.0)\n ]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n\n z_weighted, p = ARGS[argnum]\n\n # the data to load\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n\n hashstr = make_hash(kws)\n\n # output directory\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'],\n kws['krange'], kws['params'], kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n\n if not osp.exists(output):\n os.makedirs(output)\n\n # output file name\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n\n # run\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS,\n output, kws, joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"argnum\", type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=[\"0.0001\", \"0.005\"])\n\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import base
import telebot
import markups
from starter import start_bot, bot
@bot.message_handler(commands=['start'])
def start(message):
chat = message.chat
# welcome(msg)
msg = bot.send_message(chat.id, "Select a language in the list", reply_markup=markups.language())
bot.register_next_step_handler(msg, llanguage)
# base.create_user(chat.id)
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row("ok")
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"confirm"), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, "Чат-поддержка", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,
reply_markup=markups.welcome(), parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp="Выбор валюты")
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')
def currency(call):
current_currency = call.data[4:] # Выбранная валюта
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text="English", callback_data="langeng")
bt_rus = telebot.types.InlineKeyboardButton(text="Русский", callback_data="langrus")
bt_ukr = telebot.types.InlineKeyboardButton(text="Украiнський", callback_data="langukr")
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
@bot.callback_query_handler(func=lambda call: call.data[:4] == "lang")
def lan(call):
chat = call.message.chat
new_lan = call.data[4:]
bot.edit_message_text( "Вы выбрали язык",chat.id,call.message.message_id,reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'requests')
def my_requests(call):
text = base.get_text(call.message.chat.id, 'no_req')
bot.edit_message_text(text, call.message.chat.id, call.message.message_id)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
reply_markup=markups.add_request(call.message.chat.id))
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp="Назад")
def back(msg):
bot.send_message(msg.chat.id, "Операции покупки или продажи", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"operations"), reply_markup=markups.menu())
@bot.message_handler(regexp="Обменные операции")
def exchange(msg):
bot.send_message(msg.chat.id, "Купить/Продать", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"exchamge"), reply_markup=markups.exchangeI())
@bot.callback_query_handler(func=lambda call: call.data == 'buy')
def buy(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка/Продажа Monero", reply_markup=markups.payments())
@bot.callback_query_handler(func=lambda call: call.data == 'sell')
def sell(call):
chat = call.message.chat
bot.send_message(chat.id, "Продажа", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())
@bot.message_handler(regexp="Кошелёк")
def wallet(msg):
bot.send_message(msg.chat.id, "Кошелёк", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text("Выберете валюту на счёт которой придут бабосы", msg.chat.id,
msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text("Внесите " + call.data[6:], msg.chat.id, msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg=call.message
bot.edit_message_text("С какой валюты списать бобосы",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')
def wwithdraw(call):
msg=call.message
bot.edit_message_text("Введите сколько вывести" + call.data[5:],msg.chat.id,msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == "my requests")
def user_requests(call):
bot.send_message(call.message.chat.id, "Если нужно,то просто раскомменти")
# markup = telebot.types.InlineKeyboardMarkup()
# data = base.get_user_requests(call.message.chat.id)
# val = base.get_user_value(call.message.chat.id)
# if not data:
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("У вас нет объявлений", call.message.chat.id, call.message.message_id)
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
# reply_markup=markup)
#
#
# else:
# for each in data:
# btn = telebot.types.InlineKeyboardButton(
# text=each.rType + ", " + each.paymentMethod + ", " + each.rate + " " + each.currency,
# callback_data=each.currency + "->" + each.rid)
# markup.row(btn)
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("Что-то там про объявления",
# call.message.chat.id, call.message.message_id, parse_mode="markdown")
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text("Выберите тип объявления", msg.chat.id, msg.message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, "Метод оплаты", reply_markup=markups.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, "Курс")
@bot.message_handler(regexp="Настройки")
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'settings')
def setings(call):
msg = call.message
bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == "chooselanguage")
def lang(call):
chat = call.message.chat
bot.edit_message_text( "Выберите язык",chat.id,call.message.message_id, reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text("Выберите источник актульного курса", msg.chat.id, msg.message_id,
reply_markup=markups.rate())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')
def burses(call):
number_of_burse = call.data[5:]
msg = call.message
markup = telebot.types.InlineKeyboardMarkup()
bt_back_to_rates = telebot.types.InlineKeyboardButton(text="Вернуться к выбору биржы", callback_data='rate')
markup.add(bt_back_to_rates)
bot.edit_message_text("Для пары BTC/RUB теперь используются котировки биржи ...название...", msg.chat.id,
msg.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'address')
def address_cur(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.address())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text("Введите адрес", msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
def enter_address(msg):
new_address = msg
bot.send_message(msg.chat.id, "Информация сохранена")
@bot.message_handler(regexp="О сервисе")
def service(msg):
bot.send_message(msg.chat.id,"Нужно придумать")
if __name__ == "__main__":
bot.polling()
# start_bot()
|
normal
|
{
"blob_id": "7cc77de31adff5b4a394f117fc743cd6dd4bc06c",
"index": 6065,
"step-1": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\[email protected]_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\[email protected]_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\[email protected]_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'reqsell')\[email protected]_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\[email protected]_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\[email protected]_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\[email protected]_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\[email protected]_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\[email protected]_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'reqsell')\[email protected]_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\[email protected]_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\[email protected]_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\[email protected]_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\[email protected]_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\[email protected]_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\[email protected]_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\[email protected]_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'reqsell')\[email protected]_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\[email protected]_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n\n\[email protected]_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\n<mask token>\n",
"step-4": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\[email protected]_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:]\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'lang')\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text('Вы выбрали язык', chat.id, call.message.\n message_id, reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\[email protected]_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\[email protected]_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\[email protected]_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\[email protected]_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\[email protected]_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'reqsell')\[email protected]_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\[email protected]_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id, 'settings'), msg.chat.\n id, msg.message_id, reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, 'Информация сохранена')\n\n\[email protected]_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\nif __name__ == '__main__':\n bot.polling()\n",
"step-5": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n # welcome(msg)\n msg = bot.send_message(chat.id, \"Select a language in the list\", reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n # base.create_user(chat.id)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row(\"ok\")\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"confirm\"), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, \"Чат-поддержка\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,\n reply_markup=markups.welcome(), parse_mode='html')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())\n\n\[email protected]_handler(regexp=\"Выбор валюты\")\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:] # Выбранная валюта\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text=\"English\", callback_data=\"langeng\")\n bt_rus = telebot.types.InlineKeyboardButton(text=\"Русский\", callback_data=\"langrus\")\n bt_ukr = telebot.types.InlineKeyboardButton(text=\"Украiнський\", callback_data=\"langukr\")\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == \"lang\")\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text( \"Вы выбрали язык\",chat.id,call.message.message_id,reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n reply_markup=markups.add_request(call.message.chat.id))\n\n\n\[email protected]_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\[email protected]_handler(regexp=\"Назад\")\ndef back(msg):\n bot.send_message(msg.chat.id, \"Операции покупки или продажи\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"operations\"), reply_markup=markups.menu())\n\n\[email protected]_handler(regexp=\"Обменные операции\")\ndef exchange(msg):\n bot.send_message(msg.chat.id, \"Купить/Продать\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"exchamge\"), reply_markup=markups.exchangeI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка/Продажа Monero\", reply_markup=markups.payments())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Продажа\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())\n\n\[email protected]_handler(regexp=\"Кошелёк\")\ndef wallet(msg):\n bot.send_message(msg.chat.id, \"Кошелёк\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text(\"Выберете валюту на счёт которой придут бабосы\", msg.chat.id,\n msg.message_id, reply_markup=markups.bringin())\n\n\[email protected]_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text(\"Внесите \" + call.data[6:], msg.chat.id, msg.message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg=call.message\n bot.edit_message_text(\"С какой валюты списать бобосы\",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg=call.message\n bot.edit_message_text(\"Введите сколько вывести\" + call.data[5:],msg.chat.id,msg.message_id)\n\n\[email protected]_query_handler(func=lambda call: call.data == \"my requests\")\ndef user_requests(call):\n bot.send_message(call.message.chat.id, \"Если нужно,то просто раскомменти\")\n # markup = telebot.types.InlineKeyboardMarkup()\n # data = base.get_user_requests(call.message.chat.id)\n # val = base.get_user_value(call.message.chat.id)\n # if not data:\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"У вас нет объявлений\", call.message.chat.id, call.message.message_id)\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n # reply_markup=markup)\n #\n #\n # else:\n # for each in data:\n # btn = telebot.types.InlineKeyboardButton(\n # text=each.rType + \", \" + each.paymentMethod + \", \" + each.rate + \" \" + each.currency,\n # callback_data=each.currency + \"->\" + each.rid)\n # markup.row(btn)\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"Что-то там про объявления\",\n # call.message.chat.id, call.message.message_id, parse_mode=\"markdown\")\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text(\"Выберите тип объявления\", msg.chat.id, msg.message_id, reply_markup=markups.request_type())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'reqsell')\[email protected]_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, \"Метод оплаты\", reply_markup=markups.pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, \"Курс\")\n\n\[email protected]_handler(regexp=\"Настройки\")\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())\n\n\[email protected]_query_handler(func=lambda call: call.data == \"chooselanguage\")\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text( \"Выберите язык\",chat.id,call.message.message_id, reply_markup=langg())\n\n\[email protected]_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text(\"Выберите источник актульного курса\", msg.chat.id, msg.message_id,\n reply_markup=markups.rate())\n\n\[email protected]_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\"Вернуться к выбору биржы\", callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\"Для пары BTC/RUB теперь используются котировки биржи ...название...\", msg.chat.id,\n msg.message_id, reply_markup=markup)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.address())\n\n\[email protected]_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text(\"Введите адрес\", msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, \"Информация сохранена\")\n\n\[email protected]_handler(regexp=\"О сервисе\")\ndef service(msg):\n bot.send_message(msg.chat.id,\"Нужно придумать\")\n\n\nif __name__ == \"__main__\":\n bot.polling()\n # start_bot()\n",
"step-ids": [
19,
20,
30,
36,
37
]
}
|
[
19,
20,
30,
36,
37
] |
# Generated by Django 2.1.5 on 2019-03-12 18:07
from django.db import migrations
def associate_experiments_to_organisms(apps, schema_editor):
"""Creates missing associations between experiments and organisms.
Based off of:
https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html
We can't import the Experiment model directly as it may be a newer
version than this migration expects. We use the historical version.
"""
# I don't think this is truly necessary in this particular
# migration, but it seems to be a best practice for Django
# migrations and a lil extra safety never hurts.
Experiment = apps.get_model("data_refinery_common", "Experiment")
ExperimentOrganismAssociation = apps.get_model(
"data_refinery_common", "ExperimentOrganismAssociation"
)
for experiment in Experiment.objects.all():
organisms = experiment.organisms.all()
samples = experiment.samples.distinct("organism").exclude(
organism_id__in=organisms.values("id")
)
for sample in samples:
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment, organism=sample.organism
)
# This is the same as experiment.update_organism_names but we
# can't use that method because of the apps.get_model
# weirdness. It seems to be this issue:
# https://stackoverflow.com/questions/44907306/django-unavailable-field-of-model-while-doing-migration
# The method is simple enough that I'd rather duplicate it
# than disregard the warning about newer versions.
experiment.organism_names = list(
set([organism.name for organism in experiment.organisms.all()])
)
experiment.save()
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0015_dataset_email_ccdl_ok"),
]
operations = [
migrations.RunPython(associate_experiments_to_organisms),
]
|
normal
|
{
"blob_id": "b4b2307897f64bb30cad2fbaaa1b320ae2aa7456",
"index": 8553,
"step-1": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-3": "<mask token>\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n Experiment = apps.get_model('data_refinery_common', 'Experiment')\n ExperimentOrganismAssociation = apps.get_model('data_refinery_common',\n 'ExperimentOrganismAssociation')\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct('organism').exclude(\n organism_id__in=organisms.values('id'))\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(experiment=\n experiment, organism=sample.organism)\n experiment.organism_names = list(set([organism.name for organism in\n experiment.organisms.all()]))\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-4": "from django.db import migrations\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n Experiment = apps.get_model('data_refinery_common', 'Experiment')\n ExperimentOrganismAssociation = apps.get_model('data_refinery_common',\n 'ExperimentOrganismAssociation')\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct('organism').exclude(\n organism_id__in=organisms.values('id'))\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(experiment=\n experiment, organism=sample.organism)\n experiment.organism_names = list(set([organism.name for organism in\n experiment.organisms.all()]))\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-03-12 18:07\n\nfrom django.db import migrations\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n # I don't think this is truly necessary in this particular\n # migration, but it seems to be a best practice for Django\n # migrations and a lil extra safety never hurts.\n Experiment = apps.get_model(\"data_refinery_common\", \"Experiment\")\n ExperimentOrganismAssociation = apps.get_model(\n \"data_refinery_common\", \"ExperimentOrganismAssociation\"\n )\n\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct(\"organism\").exclude(\n organism_id__in=organisms.values(\"id\")\n )\n\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(\n experiment=experiment, organism=sample.organism\n )\n\n # This is the same as experiment.update_organism_names but we\n # can't use that method because of the apps.get_model\n # weirdness. It seems to be this issue:\n # https://stackoverflow.com/questions/44907306/django-unavailable-field-of-model-while-doing-migration\n # The method is simple enough that I'd rather duplicate it\n # than disregard the warning about newer versions.\n experiment.organism_names = list(\n set([organism.name for organism in experiment.organisms.all()])\n )\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"data_refinery_common\", \"0015_dataset_email_ccdl_ok\"),\n ]\n\n operations = [\n migrations.RunPython(associate_experiments_to_organisms),\n ]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from pig_util import outputSchema
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
@outputSchema('length:int')
def num_chars(word):
"""
Return the length of the provided word
"""
return len(word)
|
normal
|
{
"blob_id": "94560d8f6528a222e771ca6aa60349d9682e8f4b",
"index": 6558,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-4": "from pig_util import outputSchema\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber("odom", Odometry, self.OdomCallback)
rospy.Subscriber("cmd_vel", Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher("cmd_vel", Twist, queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01;
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service("stop_nullify", Empty, self.StopListening)
self.start_service = rospy.Service("start_nullify", Empty, self.StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
#self.Zero()
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
#print "Turning with velocity: %f" % (self.velocity)
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding: # lets counter react movement
pose = data.pose
quaternion = (pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
#print "Yaw: %f deg, Position x: %f" % (degrees(euler[2]), pose.pose.position.x)
#print "Turn: %r, Move: %r, First: %r" % (self.turn, self.move, self.first)
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
#print "Start yaw: %f" % (self.start_yaw)
#print "Start x: %f" % (self.start_x)
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
#print "Distance %f, prev distance: %f" % (self.distance, self.prev_distance)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
#print "Angle: %f" % self.angle
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
#print "Yaw: start %f, new %f" % (self.start_yaw, yaw)
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
#print "Position: start %f, new %f" % (self.start_x, x_position)
if self.move and (self.distance > self.prev_distance):
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
#print 'Resetting...'
self.first = True
self.angle = 0.0
if __name__ == "__main__":
rospy.init_node("keep_yaw")
movement_nullifier = MovementNullifier()
rospy.spin()
|
normal
|
{
"blob_id": "c349fa484476e3195e0932e425cbe93d7a7e5394",
"index": 1225,
"step-1": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n <mask token>\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-4": "import rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber(\"odom\", Odometry, self.OdomCallback)\n rospy.Subscriber(\"cmd_vel\", Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01;\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service(\"stop_nullify\", Empty, self.StopListening)\n self.start_service = rospy.Service(\"start_nullify\", Empty, self.StartListening)\n self.keep_running = True\n \n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n \n def StartListening(self, data):\n self.keep_running = True\n #self.Zero()\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n \n def Turn(self):\n #print \"Turning with velocity: %f\" % (self.velocity)\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n \n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def TwistCallback(self, data):\n \n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False \n \n def OdomCallback(self, data):\n \n if not self.keep_running:\n return\n \n twist = data.twist\n \n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n \n if not self.cmd_is_commanding: # lets counter react movement\n pose = data.pose\n quaternion = (pose.pose.orientation.x,\n pose.pose.orientation.y,\n pose.pose.orientation.z,\n pose.pose.orientation.w)\n \n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n \n x_position = pose.pose.position.x\n #print \"Yaw: %f deg, Position x: %f\" % (degrees(euler[2]), pose.pose.position.x)\n \n #print \"Turn: %r, Move: %r, First: %r\" % (self.turn, self.move, self.first)\n \n if self.turn:\n self.Turn()\n \n if self.move:\n self.Move()\n \n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp \n self.Zero() \n #print \"Start yaw: %f\" % (self.start_yaw) \n #print \"Start x: %f\" % (self.start_x) \n else: \n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n #print \"Distance %f, prev distance: %f\" % (self.distance, self.prev_distance)\n \n if self.angle >= 0.5: \n self.turn = True\n \n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n \n #print \"Angle: %f\" % self.angle\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n #print \"Yaw: start %f, new %f\" % (self.start_yaw, yaw)\n \n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n #print \"Position: start %f, new %f\" % (self.start_x, x_position)\n \n if self.move and (self.distance > self.prev_distance):\n self.move = False\n self.Zero()\n \n if self.distance >= 0.01:\n self.move = True\n \n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n \n self.prev_distance = self.distance\n \n else:\n #print 'Resetting...'\n self.first = True\n self.angle = 0.0\n \n \n \nif __name__ == \"__main__\":\n rospy.init_node(\"keep_yaw\")\n \n movement_nullifier = MovementNullifier()\n \n rospy.spin()",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green
GPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket created.")
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print("Socket bind comlete.")
return s
def setupConnection():
s.listen(1) # Allows one connection at a time.
conn, address = s.accept()
print("Connected to: " + address[0] + ":" + str(address[1]))
return conn
def RED(t):
#Red LED
GPIO.output(21,1)
time.sleep(1)
GPIO.output(21,0)
def GREEN(t):
#GREEN LED
GPIO.outdefput(20,1)
time.sleep(t)
GPIO.output(20,0)
def dataTransfer(conn):
# A big loop that receives data until told not to.
while True:
# Receive the data
data = conn.recv(1024) # receive the data
data = data.decode('utf-8')
# Split the data such that you separate the command
# from the rest of the data.
dataMessage = data.split(' ', 1)
# Command
command = dataMessage[0]
# parameter
para=dataMessage[1]
y=int(para)
if len(command)>0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print("Our server is shutting down.")
s.close()
break
else:
print('Unknown Command')
#conn.close()
s = setupServer()
#while True:
# try:
# conn = setupConnection()
# dataTransfer(conn)
# except:
# break
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print("program terminated")
finally:
GPIO.cleanup()
conn.close()
#Runs Main Function
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "78efe97d838774cb831ef205186db29f392e1953",
"index": 1584,
"step-1": "<mask token>\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\n<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\nhost = '192.168.87.191'\nport = 5560\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\ns = setupServer()\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green\nGPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red\nGPIO.setwarnings(False)\n\nhost = '192.168.87.191'\nport = 5560\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Socket created.\")\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print(\"Socket bind comlete.\")\n return s\n\ndef setupConnection():\n s.listen(1) # Allows one connection at a time.\n conn, address = s.accept()\n print(\"Connected to: \" + address[0] + \":\" + str(address[1]))\n return conn\n\ndef RED(t):\n #Red LED\n GPIO.output(21,1)\n time.sleep(1)\n GPIO.output(21,0)\n\ndef GREEN(t):\n #GREEN LED\n GPIO.outdefput(20,1)\n time.sleep(t)\n GPIO.output(20,0)\n\ndef dataTransfer(conn):\n # A big loop that receives data until told not to.\n\n while True:\n # Receive the data\n data = conn.recv(1024) # receive the data\n data = data.decode('utf-8')\n\n # Split the data such that you separate the command\n # from the rest of the data.\n dataMessage = data.split(' ', 1)\n # Command\n command = dataMessage[0]\n # parameter\n para=dataMessage[1]\n y=int(para)\n if len(command)>0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print(\"Our server is shutting down.\")\n s.close()\n break\n else:\n print('Unknown Command')\n #conn.close()\ns = setupServer()\n#while True:\n# try:\n# conn = setupConnection()\n# dataTransfer(conn)\n# except:\n# break\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print(\"program terminated\")\n finally:\n GPIO.cleanup()\n conn.close()\n#Runs Main Function\nif __name__==\"__main__\":\n main()\n\n",
"step-ids": [
2,
4,
7,
9,
10
]
}
|
[
2,
4,
7,
9,
10
] |
import math
import numpy as np
class incStat:
def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.w = 0 # weight
self.isTypeJitter = isTypeJitter
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp
if self.isTypeJitter:
if not math.isnan(self.lastTimestamp):
v = t - self.lastTimestamp
else:
v = 0
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
def processDecay(self, timestamp):
factor=1
# check for decay
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean): # calculate it only once when necessary
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var): # calculate it only once when necessary
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std): # calculate it only once when necessary
self.cur_std = math.sqrt(self.var())
return self.cur_std
#calculates and pulls all stats
def allstats(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return self.w, self.cur_mean, self.cur_var
def getHeaders(self):
return "weight", "mean", "variance"
#like incStat, but maintains stats between two streams
class incStat_2D(incStat):
def __init__(self, Lambda): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.CF3 = None # sum of residules (A-uA)
self.w = 0 # weight
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0 # the value of the last residule
#other_incS_decay is the decay factor of the other incstat
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
# check for decay
factor=1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref): # the radius of two stats
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))
def magnitude(self, istat_ref): # the magnitude of two stats
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))
#covaince approximation using a hold-and-wait model
def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
# Pearson corl. coef (using a hold-and-wait model)
def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
# calculates and pulls all stats
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return "weight", "mean", "std", "magnitude", "radius", "covariance", "pcc"
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat:
# Each lambda in the tuple L parameter determines a incStat's decay window size (factor)
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat(l,isTypeJitter))
# returns the weight, mean, and variance of each window
def getStats(self):
allstats = np.zeros(len(self.L)*3) #3 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i*3:(i*3+3)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
# lite only updates incrementals needed for weight, mean, variance, magnitude and radius
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
self.incStats[i].insert(val, timestamp)
# First updates, then gets the stats (weight, mean, and variance only)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self,t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat_2D:
# Each lambda parameter in L determines a incStat's decay window size (factor)
def __init__(self, L):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series
# returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window
def getStats(self):
allstats = np.zeros(len(self.L)*7) #6 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])
allstats[i*7:(i*7+7)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)
# First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
# Joins two windowed_incStat (e.g. rx and tx channels) together.
# other_winStat should be a [] mutable object
def join_with_winStat(self, other_winStat): # prectect with mutexes!
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0,len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self,t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat-1].processDecay(t)
return self.incStats[lastIncStat-1].w
class incStatHT:
# incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.
# The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.
# Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.
# The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.
# If 1-dimensional, set key 2 to the empty string ''.
# If 2-dimensional, key1 should be the target stream
# Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L,isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self,L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings
key = key1 + key2
wis = self.HT.get(key) # get windowed incrimental stat object
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1,key2,L,wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!
# create
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L): # prectect with mutexes!
# create
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self,L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
|
normal
|
{
"blob_id": "7b2ca3db44c5f71c2975bd8af701dafca3b3d081",
"index": 5492,
"step-1": "<mask token>\n\n\nclass windowed_incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-2": "<mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n <mask token>\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n <mask token>\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-3": "<mask token>\n\n\nclass incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-4": "<mask token>\n\n\nclass incStat:\n\n def __init__(self, Lambda, isTypeJitter=False):\n self.CF1 = 0\n self.CF2 = 0\n self.w = 0\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0):\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean):\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var):\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std):\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return 'weight', 'mean', 'variance'\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-5": "import math\nimport numpy as np\n\n\nclass incStat:\n def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.w = 0 # weight\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor=1\n # check for decay\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean): # calculate it only once when necessary\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var): # calculate it only once when necessary\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std): # calculate it only once when necessary\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n #calculates and pulls all stats\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"variance\"\n\n#like incStat, but maintains stats between two streams\nclass incStat_2D(incStat):\n def __init__(self, Lambda): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.CF3 = None # sum of residules (A-uA)\n self.w = 0 # weight\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0 # the value of the last residule\n\n #other_incS_decay is the decay factor of the other incstat\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n # check for decay\n factor=1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref): # the radius of two stats\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))\n\n def magnitude(self, istat_ref): # the magnitude of two stats\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))\n\n #covaince approximation using a hold-and-wait model\n def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n # Pearson corl. coef (using a hold-and-wait model)\n def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n # calculates and pulls all stats\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"std\", \"magnitude\", \"radius\", \"covariance\", \"pcc\"\n\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat:\n # Each lambda in the tuple L parameter determines a incStat's decay window size (factor)\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat(l,isTypeJitter))\n\n # returns the weight, mean, and variance of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*3) #3 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i*3:(i*3+3)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n # lite only updates incrementals needed for weight, mean, variance, magnitude and radius\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n # First updates, then gets the stats (weight, mean, and variance only)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self,t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat_2D:\n # Each lambda parameter in L determines a incStat's decay window size (factor)\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series\n\n # returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*7) #6 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])\n allstats[i*7:(i*7+7)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)\n\n # First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n # Joins two windowed_incStat (e.g. rx and tx channels) together.\n # other_winStat should be a [] mutable object\n def join_with_winStat(self, other_winStat): # prectect with mutexes!\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0,len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self,t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat-1].processDecay(t)\n return self.incStats[lastIncStat-1].w\n\nclass incStatHT:\n # incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.\n # The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.\n # Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.\n\n # The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.\n # If 1-dimensional, set key 2 to the empty string ''.\n # If 2-dimensional, key1 should be the target stream\n # Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L,isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self,L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\nclass incStatHT_2D(incStatHT):\n def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings\n key = key1 + key2\n wis = self.HT.get(key) # get windowed incrimental stat object\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1,key2,L,wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!\n # create\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L): # prectect with mutexes!\n # create\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self,L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-ids": [
18,
32,
35,
44,
46
]
}
|
[
18,
32,
35,
44,
46
] |
import requests
from os.path import join, exists
import os
import fitz
from tqdm import tqdm
from pathlib import Path
import tempfile
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + ".pdf")
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
def save_pdf_image(file_path, dest_path):
Path(dest_path).mkdir(parents=True, exist_ok=True)
doc = fitz.open(file_path)
i = 1
images_name = list()
xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])
maximum_digits = len(str(len(xrefs)*3))
for xref in tqdm(xrefs):
pix = fitz.Pixmap(doc, xref)
index = f'{i:0{maximum_digits}}'
img_name = "image--{}.jpg".format(index)
img_path = join(dest_path, img_name)
if not(exists(img_path)):
if pix.n >= 5:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.writeImage(img_path)
images_name.append(xref)
i += 3
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
|
normal
|
{
"blob_id": "c6113088f45951bc4c787760b6ca0138265fb83f",
"index": 9966,
"step-1": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-2": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-3": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-4": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-5": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + \".pdf\")\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])\n maximum_digits = len(str(len(xrefs)*3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = \"image--{}.jpg\".format(index)\n img_path = join(dest_path, img_name)\n if not(exists(img_path)):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pytz
import datetime
def apply_timezone_datetime(_local_tz: str, _time: datetime.time):
"""
set time zone + merge now().date() with time()
:param _local_tz:
:param _time:
:return:
"""
return pytz.timezone(_local_tz).localize(datetime.datetime.combine(
datetime.datetime.now().date(), _time))
|
normal
|
{
"blob_id": "347627df4b08eca6e2137161472b4d31534cf81b",
"index": 1238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef apply_timezone_datetime(_local_tz: str, _time: datetime.time):\n \"\"\"\n set time zone + merge now().date() with time()\n :param _local_tz:\n :param _time:\n :return:\n \"\"\"\n return pytz.timezone(_local_tz).localize(datetime.datetime.combine(\n datetime.datetime.now().date(), _time))\n",
"step-3": "import pytz\nimport datetime\n\n\ndef apply_timezone_datetime(_local_tz: str, _time: datetime.time):\n \"\"\"\n set time zone + merge now().date() with time()\n :param _local_tz:\n :param _time:\n :return:\n \"\"\"\n return pytz.timezone(_local_tz).localize(datetime.datetime.combine(\n datetime.datetime.now().date(), _time))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
from collections import OrderedDict
import torch.nn as nn
from fairseq.models import FairseqMultiModel, register_model
from pytorch_translate import common_layers, utils
@register_model("multilingual")
class MultilingualModel(FairseqMultiModel):
"""
To use, you must extend this class and define single_model_cls as a class
variable. Example:
@register_model("multilingual_transformer")
class MultilingualTransformerModel(MultilingualModel):
single_model_cls = TransformerModel
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
MultilingualModel.add_args(parser)
"""
def __init__(self, task, encoders, decoders):
super().__init__(encoders, decoders)
self.task = task
self.models = nn.ModuleDict(
{
key: self.__class__.single_model_cls(task, encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--share-encoder-embeddings",
action="store_true",
help="share encoder embeddings across languages",
)
parser.add_argument(
"--share-decoder-embeddings",
action="store_true",
help="share decoder embeddings across languages",
)
parser.add_argument(
"--share-encoders",
action="store_true",
help="share encoders across languages",
)
parser.add_argument(
"--share-decoders",
action="store_true",
help="share decoders across languages",
)
@staticmethod
def set_multilingual_arch_args(args):
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False)
args.share_encoders = getattr(args, "share_encoders", False)
args.share_decoders = getattr(args, "share_decoders", False)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_langs = [lang_pair.split("-")[0] for lang_pair in task.lang_pairs]
tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang, shared_encoder_embed_tokens=None):
if lang not in lang_encoders:
src_dict = task.dicts[lang]
if shared_encoder_embed_tokens is None:
encoder_embed_tokens = common_layers.Embedding(
num_embeddings=len(src_dict),
embedding_dim=args.encoder_embed_dim,
padding_idx=src_dict.pad(),
freeze_embed=args.encoder_freeze_embed,
normalize_embed=getattr(args, "encoder_normalize_embed", False),
)
utils.load_embedding(
embedding=encoder_embed_tokens,
dictionary=src_dict,
pretrained_embed=args.encoder_pretrained_embed,
)
else:
encoder_embed_tokens = shared_encoder_embed_tokens
lang_encoders[lang] = cls.single_model_cls.build_encoder(
args, src_dict, embed_tokens=encoder_embed_tokens
)
return lang_encoders[lang]
def get_decoder(lang, shared_decoder_embed_tokens=None):
"""
Fetch decoder for the input `lang`, which denotes the target
language of the model
"""
if lang not in lang_decoders:
tgt_dict = task.dicts[lang]
if shared_decoder_embed_tokens is None:
decoder_embed_tokens = common_layers.Embedding(
num_embeddings=len(tgt_dict),
embedding_dim=args.decoder_embed_dim,
padding_idx=tgt_dict.pad(),
freeze_embed=args.decoder_freeze_embed,
)
utils.load_embedding(
embedding=decoder_embed_tokens,
dictionary=tgt_dict,
pretrained_embed=args.decoder_pretrained_embed,
)
else:
decoder_embed_tokens = shared_decoder_embed_tokens
lang_decoders[lang] = cls.single_model_cls.build_decoder(
args, task.dicts[lang], tgt_dict, embed_tokens=decoder_embed_tokens
)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=common_layers.build_embedding,
pretrained_embed_path=None,
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=common_layers.build_embedding,
pretrained_embed_path=None,
)
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (
shared_encoder
if shared_encoder is not None
else get_encoder(
src_lang, shared_encoder_embed_tokens=shared_encoder_embed_tokens
)
)
decoders[lang_pair] = (
shared_decoder
if shared_decoder is not None
else get_decoder(
tgt_lang, shared_decoder_embed_tokens=shared_decoder_embed_tokens
)
)
return cls(task, encoders, decoders)
|
normal
|
{
"blob_id": "0ac471d2cb30a21c1246106ded14cdc4c06d2d40",
"index": 8329,
"step-1": "<mask token>\n\n\n@register_model('multilingual')\nclass MultilingualModel(FairseqMultiModel):\n <mask token>\n\n def __init__(self, task, encoders, decoders):\n super().__init__(encoders, decoders)\n self.task = task\n self.models = nn.ModuleDict({key: self.__class__.single_model_cls(\n task, encoders[key], decoders[key]) for key in self.keys})\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--share-encoder-embeddings', action=\n 'store_true', help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action=\n 'store_true', help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true', help=\n 'share encoders across languages')\n parser.add_argument('--share-decoders', action='store_true', help=\n 'share decoders across languages')\n <mask token>\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n src_langs = [lang_pair.split('-')[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.lang_pairs]\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict), embedding_dim=args.\n encoder_embed_dim, padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args,\n 'encoder_normalize_embed', False))\n utils.load_embedding(embedding=encoder_embed_tokens,\n dictionary=src_dict, pretrained_embed=args.\n encoder_pretrained_embed)\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(args,\n src_dict, embed_tokens=encoder_embed_tokens)\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict), embedding_dim=args.\n decoder_embed_dim, padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed)\n utils.load_embedding(embedding=decoder_embed_tokens,\n dictionary=tgt_dict, pretrained_embed=args.\n decoder_pretrained_embed)\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(args,\n task.dicts[lang], tgt_dict, embed_tokens=\n decoder_embed_tokens)\n return lang_decoders[lang]\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=src_langs,\n embed_dim=args.encoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=tgt_langs,\n embed_dim=args.decoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs,\n tgt_langs):\n encoders[lang_pair\n ] = shared_encoder if shared_encoder is not None else get_encoder(\n src_lang, shared_encoder_embed_tokens=\n shared_encoder_embed_tokens)\n decoders[lang_pair\n ] = shared_decoder if shared_decoder is not None else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=\n shared_decoder_embed_tokens)\n return cls(task, encoders, decoders)\n",
"step-2": "<mask token>\n\n\n@register_model('multilingual')\nclass MultilingualModel(FairseqMultiModel):\n <mask token>\n\n def __init__(self, task, encoders, decoders):\n super().__init__(encoders, decoders)\n self.task = task\n self.models = nn.ModuleDict({key: self.__class__.single_model_cls(\n task, encoders[key], decoders[key]) for key in self.keys})\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--share-encoder-embeddings', action=\n 'store_true', help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action=\n 'store_true', help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true', help=\n 'share encoders across languages')\n parser.add_argument('--share-decoders', action='store_true', help=\n 'share decoders across languages')\n\n @staticmethod\n def set_multilingual_arch_args(args):\n args.share_encoder_embeddings = getattr(args,\n 'share_encoder_embeddings', False)\n args.share_decoder_embeddings = getattr(args,\n 'share_decoder_embeddings', False)\n args.share_encoders = getattr(args, 'share_encoders', False)\n args.share_decoders = getattr(args, 'share_decoders', False)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n src_langs = [lang_pair.split('-')[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.lang_pairs]\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict), embedding_dim=args.\n encoder_embed_dim, padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args,\n 'encoder_normalize_embed', False))\n utils.load_embedding(embedding=encoder_embed_tokens,\n dictionary=src_dict, pretrained_embed=args.\n encoder_pretrained_embed)\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(args,\n src_dict, embed_tokens=encoder_embed_tokens)\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict), embedding_dim=args.\n decoder_embed_dim, padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed)\n utils.load_embedding(embedding=decoder_embed_tokens,\n dictionary=tgt_dict, pretrained_embed=args.\n decoder_pretrained_embed)\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(args,\n task.dicts[lang], tgt_dict, embed_tokens=\n decoder_embed_tokens)\n return lang_decoders[lang]\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=src_langs,\n embed_dim=args.encoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=tgt_langs,\n embed_dim=args.decoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs,\n tgt_langs):\n encoders[lang_pair\n ] = shared_encoder if shared_encoder is not None else get_encoder(\n src_lang, shared_encoder_embed_tokens=\n shared_encoder_embed_tokens)\n decoders[lang_pair\n ] = shared_decoder if shared_decoder is not None else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=\n shared_decoder_embed_tokens)\n return cls(task, encoders, decoders)\n",
"step-3": "<mask token>\n\n\n@register_model('multilingual')\nclass MultilingualModel(FairseqMultiModel):\n \"\"\"\n To use, you must extend this class and define single_model_cls as a class\n variable. Example:\n\n @register_model(\"multilingual_transformer\")\n class MultilingualTransformerModel(MultilingualModel):\n single_model_cls = TransformerModel\n\n @staticmethod\n def add_args(parser):\n TransformerModel.add_args(parser)\n MultilingualModel.add_args(parser)\n \"\"\"\n\n def __init__(self, task, encoders, decoders):\n super().__init__(encoders, decoders)\n self.task = task\n self.models = nn.ModuleDict({key: self.__class__.single_model_cls(\n task, encoders[key], decoders[key]) for key in self.keys})\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--share-encoder-embeddings', action=\n 'store_true', help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action=\n 'store_true', help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true', help=\n 'share encoders across languages')\n parser.add_argument('--share-decoders', action='store_true', help=\n 'share decoders across languages')\n\n @staticmethod\n def set_multilingual_arch_args(args):\n args.share_encoder_embeddings = getattr(args,\n 'share_encoder_embeddings', False)\n args.share_decoder_embeddings = getattr(args,\n 'share_decoder_embeddings', False)\n args.share_encoders = getattr(args, 'share_encoders', False)\n args.share_decoders = getattr(args, 'share_decoders', False)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n src_langs = [lang_pair.split('-')[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.lang_pairs]\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict), embedding_dim=args.\n encoder_embed_dim, padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args,\n 'encoder_normalize_embed', False))\n utils.load_embedding(embedding=encoder_embed_tokens,\n dictionary=src_dict, pretrained_embed=args.\n encoder_pretrained_embed)\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(args,\n src_dict, embed_tokens=encoder_embed_tokens)\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict), embedding_dim=args.\n decoder_embed_dim, padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed)\n utils.load_embedding(embedding=decoder_embed_tokens,\n dictionary=tgt_dict, pretrained_embed=args.\n decoder_pretrained_embed)\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(args,\n task.dicts[lang], tgt_dict, embed_tokens=\n decoder_embed_tokens)\n return lang_decoders[lang]\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=src_langs,\n embed_dim=args.encoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=tgt_langs,\n embed_dim=args.decoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs,\n tgt_langs):\n encoders[lang_pair\n ] = shared_encoder if shared_encoder is not None else get_encoder(\n src_lang, shared_encoder_embed_tokens=\n shared_encoder_embed_tokens)\n decoders[lang_pair\n ] = shared_decoder if shared_decoder is not None else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=\n shared_decoder_embed_tokens)\n return cls(task, encoders, decoders)\n",
"step-4": "from collections import OrderedDict\nimport torch.nn as nn\nfrom fairseq.models import FairseqMultiModel, register_model\nfrom pytorch_translate import common_layers, utils\n\n\n@register_model('multilingual')\nclass MultilingualModel(FairseqMultiModel):\n \"\"\"\n To use, you must extend this class and define single_model_cls as a class\n variable. Example:\n\n @register_model(\"multilingual_transformer\")\n class MultilingualTransformerModel(MultilingualModel):\n single_model_cls = TransformerModel\n\n @staticmethod\n def add_args(parser):\n TransformerModel.add_args(parser)\n MultilingualModel.add_args(parser)\n \"\"\"\n\n def __init__(self, task, encoders, decoders):\n super().__init__(encoders, decoders)\n self.task = task\n self.models = nn.ModuleDict({key: self.__class__.single_model_cls(\n task, encoders[key], decoders[key]) for key in self.keys})\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--share-encoder-embeddings', action=\n 'store_true', help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action=\n 'store_true', help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true', help=\n 'share encoders across languages')\n parser.add_argument('--share-decoders', action='store_true', help=\n 'share decoders across languages')\n\n @staticmethod\n def set_multilingual_arch_args(args):\n args.share_encoder_embeddings = getattr(args,\n 'share_encoder_embeddings', False)\n args.share_decoder_embeddings = getattr(args,\n 'share_decoder_embeddings', False)\n args.share_encoders = getattr(args, 'share_encoders', False)\n args.share_decoders = getattr(args, 'share_decoders', False)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n src_langs = [lang_pair.split('-')[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.lang_pairs]\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict), embedding_dim=args.\n encoder_embed_dim, padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args,\n 'encoder_normalize_embed', False))\n utils.load_embedding(embedding=encoder_embed_tokens,\n dictionary=src_dict, pretrained_embed=args.\n encoder_pretrained_embed)\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(args,\n src_dict, embed_tokens=encoder_embed_tokens)\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict), embedding_dim=args.\n decoder_embed_dim, padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed)\n utils.load_embedding(embedding=decoder_embed_tokens,\n dictionary=tgt_dict, pretrained_embed=args.\n decoder_pretrained_embed)\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(args,\n task.dicts[lang], tgt_dict, embed_tokens=\n decoder_embed_tokens)\n return lang_decoders[lang]\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=src_langs,\n embed_dim=args.encoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = (FairseqMultiModel.\n build_shared_embeddings(dicts=task.dicts, langs=tgt_langs,\n embed_dim=args.decoder_embed_dim, build_embedding=\n common_layers.build_embedding, pretrained_embed_path=None))\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs,\n tgt_langs):\n encoders[lang_pair\n ] = shared_encoder if shared_encoder is not None else get_encoder(\n src_lang, shared_encoder_embed_tokens=\n shared_encoder_embed_tokens)\n decoders[lang_pair\n ] = shared_decoder if shared_decoder is not None else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=\n shared_decoder_embed_tokens)\n return cls(task, encoders, decoders)\n",
"step-5": "#!/usr/bin/env python3\n\nfrom collections import OrderedDict\n\nimport torch.nn as nn\nfrom fairseq.models import FairseqMultiModel, register_model\nfrom pytorch_translate import common_layers, utils\n\n\n@register_model(\"multilingual\")\nclass MultilingualModel(FairseqMultiModel):\n \"\"\"\n To use, you must extend this class and define single_model_cls as a class\n variable. Example:\n\n @register_model(\"multilingual_transformer\")\n class MultilingualTransformerModel(MultilingualModel):\n single_model_cls = TransformerModel\n\n @staticmethod\n def add_args(parser):\n TransformerModel.add_args(parser)\n MultilingualModel.add_args(parser)\n \"\"\"\n\n def __init__(self, task, encoders, decoders):\n super().__init__(encoders, decoders)\n self.task = task\n self.models = nn.ModuleDict(\n {\n key: self.__class__.single_model_cls(task, encoders[key], decoders[key])\n for key in self.keys\n }\n )\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument(\n \"--share-encoder-embeddings\",\n action=\"store_true\",\n help=\"share encoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-decoder-embeddings\",\n action=\"store_true\",\n help=\"share decoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-encoders\",\n action=\"store_true\",\n help=\"share encoders across languages\",\n )\n parser.add_argument(\n \"--share-decoders\",\n action=\"store_true\",\n help=\"share decoders across languages\",\n )\n\n @staticmethod\n def set_multilingual_arch_args(args):\n args.share_encoder_embeddings = getattr(args, \"share_encoder_embeddings\", False)\n args.share_decoder_embeddings = getattr(args, \"share_decoder_embeddings\", False)\n args.share_encoders = getattr(args, \"share_encoders\", False)\n args.share_decoders = getattr(args, \"share_decoders\", False)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 1024\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 1024\n\n src_langs = [lang_pair.split(\"-\")[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split(\"-\")[1] for lang_pair in task.lang_pairs]\n\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n\n # encoders/decoders for each language\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict),\n embedding_dim=args.encoder_embed_dim,\n padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args, \"encoder_normalize_embed\", False),\n )\n utils.load_embedding(\n embedding=encoder_embed_tokens,\n dictionary=src_dict,\n pretrained_embed=args.encoder_pretrained_embed,\n )\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(\n args, src_dict, embed_tokens=encoder_embed_tokens\n )\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict),\n embedding_dim=args.decoder_embed_dim,\n padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed,\n )\n utils.load_embedding(\n embedding=decoder_embed_tokens,\n dictionary=tgt_dict,\n pretrained_embed=args.decoder_pretrained_embed,\n )\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(\n args, task.dicts[lang], tgt_dict, embed_tokens=decoder_embed_tokens\n )\n return lang_decoders[lang]\n\n # shared encoders/decoders (if applicable)\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=src_langs,\n embed_dim=args.encoder_embed_dim,\n build_embedding=common_layers.build_embedding,\n pretrained_embed_path=None,\n )\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=tgt_langs,\n embed_dim=args.decoder_embed_dim,\n build_embedding=common_layers.build_embedding,\n pretrained_embed_path=None,\n )\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs, tgt_langs):\n encoders[lang_pair] = (\n shared_encoder\n if shared_encoder is not None\n else get_encoder(\n src_lang, shared_encoder_embed_tokens=shared_encoder_embed_tokens\n )\n )\n decoders[lang_pair] = (\n shared_decoder\n if shared_decoder is not None\n else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=shared_decoder_embed_tokens\n )\n )\n\n return cls(task, encoders, decoders)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 19:16:16 2019
@author: pc
"""
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况
self.ADDR = ('',port)#指定套接字端口号
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:#IPV4 or IPV6
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6,SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print("video close")
def run(self):
detector, predictor = face_capture_edit.face_init(self.face_shape_predictor)
print("face_capture_init is ready")
print("VIDEO server starts ...")
self.sock.bind(self.ADDR)#关联特定的端口号
self.sock.listen(1)#监听
conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接
print("remote VIDEO client success connected ...")
data = "".encode("utf-8")#接收数据
payload_size = struct.calcsize("L")#记录当前缓冲区的数据长度,准确提取每一帧
cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧
data +=conn.recv(81920)
packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧
data = data[payload_size:]#从指定位置剪切到末尾
msg_size = struct.unpack("L",packed_size)[0]#解压前面的头
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)
cv2.imshow("Face_capture", frame_face)
if self.view_version == 0:#不变样式
frame = frame
elif self.view_version == 1:#漫画
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:#铅笔画
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow("Remote",0);
cv2.resizeWindow("Remote", 640, 480);
cv2.imshow("Remote", frame)
if cv2.waitKey(1) & 0xff == ord('q'):
file_aip = open(self.break_audio_aip,'w')
file_audio = open(self.break_audio,'w')
break
|
normal
|
{
"blob_id": "6b138dabf57166ec971052fff7df89ae0346e083",
"index": 1582,
"step-1": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n <mask token>\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-2": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-3": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-4": "<mask token>\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 19:16:16 2019\n\n@author: pc\n\"\"\"\n\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\nclass Video_Server(threading.Thread):\n def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况\n self.ADDR = ('',port)#指定套接字端口号\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:#IPV4 or IPV6\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6,SOCK_STREAM)\n \n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print(\"video close\")\n \n def run(self):\n detector, predictor = face_capture_edit.face_init(self.face_shape_predictor) \n print(\"face_capture_init is ready\")\n print(\"VIDEO server starts ...\")\n self.sock.bind(self.ADDR)#关联特定的端口号\n self.sock.listen(1)#监听\n conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接\n print(\"remote VIDEO client success connected ...\")\n data = \"\".encode(\"utf-8\")#接收数据\n payload_size = struct.calcsize(\"L\")#记录当前缓冲区的数据长度,准确提取每一帧\n cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧\n data +=conn.recv(81920)\n packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧\n data = data[payload_size:]#从指定位置剪切到末尾\n msg_size = struct.unpack(\"L\",packed_size)[0]#解压前面的头\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)\n cv2.imshow(\"Face_capture\", frame_face)\n if self.view_version == 0:#不变样式\n frame = frame\n elif self.view_version == 1:#漫画\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:#铅笔画\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow(\"Remote\",0);\n cv2.resizeWindow(\"Remote\", 640, 480);\n cv2.imshow(\"Remote\", frame)\n if cv2.waitKey(1) & 0xff == ord('q'):\n file_aip = open(self.break_audio_aip,'w')\n file_audio = open(self.break_audio,'w')\n break\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from sqlalchemy.orm import sessionmaker
from IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table
class ScrapySpiderPipeline(object):
# Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
# Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder
def process_item(self, item, spider):
session = self.Session()
ım_db = IMDB_DATABASE()
ım_db.MOVIE_CODE = item["MOVIE_CODE"]
ım_db.MOVIE_NAME = item["MOVIE_NAME"]
ım_db.YEAR = item["YEAR"]
ım_db.RANK = item["RANK"]
ım_db.IMDB_RATING = item["IMDB_RATING"]
# Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner
try:
session.add(ım_db)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
normal
|
{
"blob_id": "16074fc1824a99b6fd1c4bf113d5b752308e8803",
"index": 5198,
"step-1": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-4": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-5": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n \n # Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.\n def __init__(self):\n \n engine = db_connect()\n create_table(engine)\n \n self.Session = sessionmaker(bind=engine)\n\n # Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder\n def process_item(self, item, spider):\n\n session = self.Session()\n \n ım_db = IMDB_DATABASE()\n \n ım_db.MOVIE_CODE = item[\"MOVIE_CODE\"]\n \n ım_db.MOVIE_NAME = item[\"MOVIE_NAME\"]\n\n ım_db.YEAR = item[\"YEAR\"]\n\n ım_db.RANK = item[\"RANK\"]\n\n ım_db.IMDB_RATING = item[\"IMDB_RATING\"]\n\n\n\n # Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner\n try:\n session.add(ım_db)\n session.commit()\n \n except:\n session.rollback()\n raise\n \n finally:\n session.close()\n\n return item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# PROBLEM: Code organized in package and want to import a submodule from one o the other pkg
# submodules without hardcoding the package name into the import statement
# SOLUTION: Use pkg-relative import
# Absolete path
from mypackage.A import grok
print(dir(grok))
grok.testA()
|
normal
|
{
"blob_id": "ad9facb9c8e552845df9171549f886f3e9cba193",
"index": 7544,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dir(grok))\ngrok.testA()\n",
"step-3": "from mypackage.A import grok\nprint(dir(grok))\ngrok.testA()\n",
"step-4": "# PROBLEM: Code organized in package and want to import a submodule from one o the other pkg\n# submodules without hardcoding the package name into the import statement\n# SOLUTION: Use pkg-relative import\n\n# Absolete path\nfrom mypackage.A import grok\n\nprint(dir(grok))\ngrok.testA()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.
# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0: # don't need rotate
return head
fast = slow = head # fast and slow point
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next # ready result
slow.next = None
fast.next = head
return res
|
normal
|
{
"blob_id": "a79c9799ed237a943ae3d249a4d66eb2f8693e83",
"index": 1896,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def rotateRight(self, head: ListNode, k: int) ->ListNode:\n if head is None or head.next is None or k == 0:\n return head\n tmp, length = head, 1\n while tmp.next:\n tmp = tmp.next\n length += 1\n k = k % length\n if k == 0:\n return head\n fast = slow = head\n for _ in range(k):\n fast = fast.next\n while fast.next:\n fast = fast.next\n slow = slow.next\n res = slow.next\n slow.next = None\n fast.next = head\n return res\n",
"step-4": "# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.\r\n# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\r\n if head is None or head.next is None or k == 0:\r\n return head\r\n tmp, length = head, 1\r\n while tmp.next:\r\n tmp = tmp.next\r\n length += 1\r\n k = k % length\r\n if k == 0: # don't need rotate\r\n return head\r\n fast = slow = head # fast and slow point\r\n for _ in range(k):\r\n fast = fast.next\r\n while fast.next:\r\n fast = fast.next\r\n slow = slow.next\r\n res = slow.next # ready result\r\n slow.next = None\r\n fast.next = head\r\n return res\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.