code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import os
from enum import Enum
STAFF_CODE = os.getenv('STAFF_CODE', '20190607')
ADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')
TEAM_NAMES = (
'밍크고래팀',
'혹등고래팀',
'대왕고래팀',
'향유고래팀',
)
TEAM_COUNT = 3
MAX_TEAM_MEMBER_COUNT = 10
class TIME_CHECK(Enum):
BEFORE_START = 0
DURING_TIME = 1
AFTER_END = 2
|
normal
|
{
"blob_id": "967984444d9e26452226b13f33c5afbc96b5fe2b",
"index": 3176,
"step-1": "<mask token>\n\n\nclass TIME_CHECK(Enum):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-3": "<mask token>\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\nTEAM_NAMES = '밍크고래팀', '혹등고래팀', '대왕고래팀', '향유고래팀'\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-4": "import os\nfrom enum import Enum\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\nTEAM_NAMES = '밍크고래팀', '혹등고래팀', '대왕고래팀', '향유고래팀'\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-5": "import os\nfrom enum import Enum\n\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\n\nTEAM_NAMES = (\n '밍크고래팀',\n '혹등고래팀',\n '대왕고래팀',\n '향유고래팀',\n)\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(version)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
API_URL = 'https://meta.decidim.org/api'
decidim_connector = DecidimConnector(API_URL)
version_reader = VersionReader(decidim_connector)
version = version_reader.process_query()
print(version)
participatory_processes_reader = ParticipatoryProcessesReader(decidim_connector
)
participatory_processes = participatory_processes_reader.process_query()
<|reserved_special_token_1|>
from api.decidim_connector import DecidimConnector
from api.participatory_processes_reader import ParticipatoryProcessesReader
from api.version_reader import VersionReader
API_URL = 'https://meta.decidim.org/api'
decidim_connector = DecidimConnector(API_URL)
version_reader = VersionReader(decidim_connector)
version = version_reader.process_query()
print(version)
participatory_processes_reader = ParticipatoryProcessesReader(decidim_connector
)
participatory_processes = participatory_processes_reader.process_query()
<|reserved_special_token_1|>
from api.decidim_connector import DecidimConnector
from api.participatory_processes_reader import ParticipatoryProcessesReader
from api.version_reader import VersionReader
API_URL = "https://meta.decidim.org/api"
decidim_connector = DecidimConnector(API_URL)
version_reader = VersionReader(decidim_connector)
version = version_reader.process_query()
print(version)
participatory_processes_reader = ParticipatoryProcessesReader(decidim_connector)
participatory_processes = participatory_processes_reader.process_query()
|
flexible
|
{
"blob_id": "88a469eba61fb6968db8cc5e1f93f12093b7f128",
"index": 6973,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(version)\n<mask token>\n",
"step-3": "<mask token>\nAPI_URL = 'https://meta.decidim.org/api'\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector\n )\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-4": "from api.decidim_connector import DecidimConnector\nfrom api.participatory_processes_reader import ParticipatoryProcessesReader\nfrom api.version_reader import VersionReader\nAPI_URL = 'https://meta.decidim.org/api'\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector\n )\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-5": "from api.decidim_connector import DecidimConnector\nfrom api.participatory_processes_reader import ParticipatoryProcessesReader\nfrom api.version_reader import VersionReader\n\nAPI_URL = \"https://meta.decidim.org/api\"\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\n\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector)\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" Tests for challenge116 """
import pytest
from robber import expect
from pemjh.challenge116 import main
@pytest.mark.parametrize('input, expected',
[
pytest.param(5, 12, marks=pytest.mark.example),
pytest.param(50, 20492570929,
marks=pytest.mark.regression)
])
def test_challenge116(input, expected):
""" Regression testing challenge116 """
expect(main(input)).to.eq(expected)
|
normal
|
{
"blob_id": "c9279434736d4e94564170fe98163ad3be9470b1",
"index": 4844,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('input, expected', [pytest.param(5, 12, marks=\n pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.\n regression)])\ndef test_challenge116(input, expected):\n \"\"\" Regression testing challenge116 \"\"\"\n expect(main(input)).to.eq(expected)\n",
"step-3": "<mask token>\nimport pytest\nfrom robber import expect\nfrom pemjh.challenge116 import main\n\n\[email protected]('input, expected', [pytest.param(5, 12, marks=\n pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.\n regression)])\ndef test_challenge116(input, expected):\n \"\"\" Regression testing challenge116 \"\"\"\n expect(main(input)).to.eq(expected)\n",
"step-4": "\"\"\" Tests for challenge116 \"\"\"\r\nimport pytest\r\nfrom robber import expect\r\nfrom pemjh.challenge116 import main\r\n\r\n\r\[email protected]('input, expected',\r\n [\r\n pytest.param(5, 12, marks=pytest.mark.example),\r\n pytest.param(50, 20492570929,\r\n marks=pytest.mark.regression)\r\n ])\r\ndef test_challenge116(input, expected):\r\n \"\"\" Regression testing challenge116 \"\"\"\r\n expect(main(input)).to.eq(expected)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {'image_shape': [256, 256, 3], 'input_shape': [224,
224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps':
680, 'trn_nb_epochs': 200, 'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':
'data/train-jpg', 'tst_imgs_csv':
'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str
(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(
outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
<|reserved_special_token_0|>
def get_mean_img(self, imgs_paths, mean_img_path):
"""Compute the mean image from the given paths and save it to the given path."""
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32
) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
df = pd.read_csv(imgs_csv)
imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[
'image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %
self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size']] + self.config
['input_shape'])
tags_batch = np.zeros([self.config['batch_size']] + self.config
['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),
len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'],
preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.
float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {'image_shape': [256, 256, 3], 'input_shape': [224,
224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps':
680, 'trn_nb_epochs': 200, 'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':
'data/train-jpg', 'tst_imgs_csv':
'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str
(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(
outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.
config['trn_imgs_dir'], self.config['trn_transform'])
cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(
'%s/history.csv' % self.cpdir), ModelCheckpoint(
'%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=
True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,
epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=
'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config[
'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[
'trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
"""Compute the mean image from the given paths and save it to the given path."""
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32
) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
df = pd.read_csv(imgs_csv)
imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[
'image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %
self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size']] + self.config
['input_shape'])
tags_batch = np.zeros([self.config['batch_size']] + self.config
['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),
len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'],
preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.
float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(317)
<|reserved_special_token_0|>
sys.path.append('.')
<|reserved_special_token_0|>
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {'image_shape': [256, 256, 3], 'input_shape': [224,
224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps':
680, 'trn_nb_epochs': 200, 'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':
'data/train-jpg', 'tst_imgs_csv':
'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str
(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(
outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.
config['trn_imgs_dir'], self.config['trn_transform'])
cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(
'%s/history.csv' % self.cpdir), ModelCheckpoint(
'%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=
True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,
epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=
'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config[
'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[
'trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
"""Compute the mean image from the given paths and save it to the given path."""
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32
) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
df = pd.read_csv(imgs_csv)
imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[
'image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %
self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size']] + self.config
['input_shape'])
tags_batch = np.zeros([self.config['batch_size']] + self.config
['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),
len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'],
preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.
float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
if __name__ == '__main__':
from planet.model_runner import model_runner
model = VGGNet()
model_runner(model)
<|reserved_special_token_1|>
import numpy as np
np.random.seed(317)
from glob import glob
from itertools import cycle
from keras.applications.vgg19 import VGG19
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback
from keras.losses import kullback_leibler_divergence
from math import ceil
from os import path, mkdir, listdir
from skimage.transform import resize
from scipy.misc import imread, imsave
from time import time
import argparse
import logging
import keras.backend as K
import pandas as pd
import tifffile as tif
import sys
sys.path.append('.')
from planet.utils.data_utils import tagset_to_ints, random_transforms
from planet.utils.keras_utils import HistoryPlot
from planet.utils.runtime import funcname
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {'image_shape': [256, 256, 3], 'input_shape': [224,
224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps':
680, 'trn_nb_epochs': 200, 'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':
'data/train-jpg', 'tst_imgs_csv':
'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str
(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(
outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.
config['trn_imgs_dir'], self.config['trn_transform'])
cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(
'%s/history.csv' % self.cpdir), ModelCheckpoint(
'%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=
True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,
epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=
'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config[
'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[
'trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
"""Compute the mean image from the given paths and save it to the given path."""
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32
) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
df = pd.read_csv(imgs_csv)
imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[
'image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %
self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size']] + self.config
['input_shape'])
tags_batch = np.zeros([self.config['batch_size']] + self.config
['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),
len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'],
preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.
float32) / 255.0
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32
) / 255.0 - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
if __name__ == '__main__':
from planet.model_runner import model_runner
model = VGGNet()
model_runner(model)
<|reserved_special_token_1|>
# VGGNet
import numpy as np
np.random.seed(317)
from glob import glob
from itertools import cycle
from keras.applications.vgg19 import VGG19
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback
from keras.losses import kullback_leibler_divergence
from math import ceil
from os import path, mkdir, listdir
from skimage.transform import resize
from scipy.misc import imread, imsave
from time import time
import argparse
import logging
import keras.backend as K
import pandas as pd
import tifffile as tif
import sys
sys.path.append('.')
from planet.utils.data_utils import tagset_to_ints, random_transforms
from planet.utils.keras_utils import HistoryPlot
from planet.utils.runtime import funcname
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {
'image_shape': [256, 256, 3],
'input_shape': [224, 224, 3],
'output_shape': [17, ],
'batch_size': 60,
'trn_steps': 680,
'trn_nb_epochs': 200,
'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv',
'trn_imgs_dir': 'data/train-jpg',
'tst_imgs_csv': 'data/sample_submission_v2.csv',
'tst_imgs_dir': 'data/test-jpg'
}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.config[
'trn_imgs_dir'], self.config['trn_transform'])
cb = [
HistoryPlot('%s/history.png' % self.cpdir),
CSVLogger('%s/history.csv' % self.cpdir),
ModelCheckpoint('%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=True),
ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2, epsilon=0.005, verbose=1, mode='min'),
EarlyStopping(monitor='F2', min_delta=0.01, patience=10, verbose=1, mode='max')
]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config['trn_steps'], verbose=1, callbacks=cb,
epochs=self.config['trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
'''Compute the mean image from the given paths and save it to the given path.'''
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
# Read the CSV and extract image names and tags.
df = pd.read_csv(imgs_csv)
imgs_paths = ['%s/%s.jpg' % (imgs_dir, n) for n in df['image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
# Compute the mean image for pre-processing.
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' % self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size'], ] + self.config['input_shape'])
tags_batch = np.zeros([self.config['batch_size'], ] + self.config['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)), len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'], preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
# Get the mean image
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
if __name__ == "__main__":
from planet.model_runner import model_runner
model = VGGNet()
model_runner(model)
|
flexible
|
{
"blob_id": "c6a4d566460a06504abf7e2c54be4f2ea36e01fb",
"index": 7735,
"step-1": "<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n <mask token>\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\n<mask token>\n",
"step-3": "<mask token>\nnp.random.seed(317)\n<mask token>\nsys.path.append('.')\n<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\nif __name__ == '__main__':\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-4": "import numpy as np\nnp.random.seed(317)\nfrom glob import glob\nfrom itertools import cycle\nfrom keras.applications.vgg19 import VGG19\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense\nfrom keras.utils import plot_model\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback\nfrom keras.losses import kullback_leibler_divergence\nfrom math import ceil\nfrom os import path, mkdir, listdir\nfrom skimage.transform import resize\nfrom scipy.misc import imread, imsave\nfrom time import time\nimport argparse\nimport logging\nimport keras.backend as K\nimport pandas as pd\nimport tifffile as tif\nimport sys\nsys.path.append('.')\nfrom planet.utils.data_utils import tagset_to_ints, random_transforms\nfrom planet.utils.keras_utils import HistoryPlot\nfrom planet.utils.runtime import funcname\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\nif __name__ == '__main__':\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-5": "# VGGNet\nimport numpy as np\nnp.random.seed(317)\n\nfrom glob import glob\nfrom itertools import cycle\nfrom keras.applications.vgg19 import VGG19\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense\nfrom keras.utils import plot_model\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback\nfrom keras.losses import kullback_leibler_divergence\nfrom math import ceil\nfrom os import path, mkdir, listdir\nfrom skimage.transform import resize\nfrom scipy.misc import imread, imsave\nfrom time import time\nimport argparse\nimport logging\nimport keras.backend as K\nimport pandas as pd\nimport tifffile as tif\n\nimport sys\nsys.path.append('.')\nfrom planet.utils.data_utils import tagset_to_ints, random_transforms\nfrom planet.utils.keras_utils import HistoryPlot\nfrom planet.utils.runtime import funcname\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n\n self.config = {\n 'image_shape': [256, 256, 3],\n 'input_shape': [224, 224, 3],\n 'output_shape': [17, ],\n 'batch_size': 60,\n 'trn_steps': 680,\n 'trn_nb_epochs': 200,\n 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv',\n 'trn_imgs_dir': 'data/train-jpg',\n 'tst_imgs_csv': 'data/sample_submission_v2.csv',\n 'tst_imgs_dir': 'data/test-jpg'\n }\n\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str(x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon()))\n\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.config[\n 'trn_imgs_dir'], self.config['trn_transform'])\n\n cb = [\n HistoryPlot('%s/history.png' % self.cpdir),\n CSVLogger('%s/history.csv' % self.cpdir),\n ModelCheckpoint('%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=True),\n ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2, epsilon=0.005, verbose=1, mode='min'),\n EarlyStopping(monitor='F2', min_delta=0.01, patience=10, verbose=1, mode='max')\n ]\n\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config['trn_steps'], verbose=1, callbacks=cb,\n epochs=self.config['trn_nb_epochs'], workers=2, pickle_safe=True)\n\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n '''Compute the mean image from the given paths and save it to the given path.'''\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n\n logger = logging.getLogger(funcname())\n\n # Read the CSV and extract image names and tags.\n df = pd.read_csv(imgs_csv)\n imgs_paths = ['%s/%s.jpg' % (imgs_dir, n) for n in df['image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n\n # Compute the mean image for pre-processing.\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' % self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean\n\n while True:\n\n imgs_batch = np.zeros([self.config['batch_size'], ] + self.config['input_shape'])\n tags_batch = np.zeros([self.config['batch_size'], ] + self.config['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)), len(imgs_paths)))\n\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'], preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n\n # Get the mean image\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.float32) / 255.\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean\n\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\nif __name__ == \"__main__\":\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<|reserved_special_token_0|>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<|reserved_special_token_0|>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<|reserved_special_token_0|>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2 * expo1 / pi, 0.75)
norm2 = pow(2 * expo2 / pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2,
power_of_r + 2)
return value
<|reserved_special_token_1|>
import os, sys
import numpy as np
from math import exp, sqrt, pi
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<|reserved_special_token_0|>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2 * expo1 / pi, 0.75)
norm2 = pow(2 * expo2 / pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2,
power_of_r + 2)
return value
<|reserved_special_token_1|>
# coding: utf-8
import os, sys
import numpy as np
from math import exp, sqrt, pi
def factorial(n):
value = 1
for i in range(n,1,-1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
#print("n:", n, "double factorial:", k)
return k
"""\int_0^\infty r^m e^{-alpha * r^2} dr"""
def gaussian_integral(alpha, m):
if int(m/2)*2 == m: # even number
n = int(m/2)
value = double_factorial(2*n-1) * sqrt(pi) / pow(2, n+1) / pow(alpha, n+0.5)
else:
n = int((m-1)/2)
value = factorial(n) / 2 / pow(alpha, n+1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2*expo1/pi, 0.75)
norm2 = pow(2*expo2/pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1+expo2, power_of_r+2)
return value
|
flexible
|
{
"blob_id": "005650e2747c61b730960a29891b6ba6c8bd381b",
"index": 1334,
"step-1": "<mask token>\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2 * expo1 / pi, 0.75)\n norm2 = pow(2 * expo2 / pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2, \n power_of_r + 2)\n return value\n",
"step-4": "import os, sys\nimport numpy as np\nfrom math import exp, sqrt, pi\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2 * expo1 / pi, 0.75)\n norm2 = pow(2 * expo2 / pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2, \n power_of_r + 2)\n return value\n",
"step-5": "# coding: utf-8\n\nimport os, sys\nimport numpy as np\nfrom math import exp, sqrt, pi\n\ndef factorial(n):\n value = 1\n for i in range(n,1,-1):\n value *= i\n return value\n \ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n #print(\"n:\", n, \"double factorial:\", k)\n return k\n\n\"\"\"\\int_0^\\infty r^m e^{-alpha * r^2} dr\"\"\"\ndef gaussian_integral(alpha, m):\n if int(m/2)*2 == m: # even number\n n = int(m/2)\n value = double_factorial(2*n-1) * sqrt(pi) / pow(2, n+1) / pow(alpha, n+0.5)\n else:\n n = int((m-1)/2)\n value = factorial(n) / 2 / pow(alpha, n+1)\n return value\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2*expo1/pi, 0.75)\n norm2 = pow(2*expo2/pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1+expo2, power_of_r+2)\n return value\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
<|reserved_special_token_0|>
print(new_boy)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = '{"ddd": {{}}}'
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
new_boy = json.loads(a, object_hook=boyhook)
print(new_boy)
<|reserved_special_token_1|>
import json
a = '{"ddd": {{}}}'
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
new_boy = json.loads(a, object_hook=boyhook)
print(new_boy)
<|reserved_special_token_1|>
# coding=utf-8
# @FileName: test_json.py
# @Author: ZhengQiang
# Date: 2020/1/15 5:26 下午
import json
a = "{\"ddd\": {{}}}"
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
new_boy = json.loads(a, object_hook=boyhook)
print(new_boy)
|
flexible
|
{
"blob_id": "2bc5711839ccbe525551b60211d8cd79ddb7775a",
"index": 7019,
"step-1": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\nprint(new_boy)\n",
"step-3": "<mask token>\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n",
"step-4": "import json\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n",
"step-5": "# coding=utf-8\n# @FileName: test_json.py\n# @Author: ZhengQiang\n# Date: 2020/1/15 5:26 下午\nimport json\na = \"{\\\"ddd\\\": {{}}}\"\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
file = pd.read_csv("KDDTest+.csv")
with open("test_9feats.csv", "w") as f:
df = pd.DataFrame(file,
columns=[
"dst_host_srv_serror_rate", "dst_host_serror_rate",
"serror_rate", "srv_serror_rate", "count", "flag",
"same_srv_rate", "dst_host_srv_count",
"dst_host_diff_srv_rate", "Malicious"
])
df.to_csv(f, index=False, header=True, line_terminator='\n')
print(df)
|
normal
|
{
"blob_id": "ce28330db66dcdfad63bdac698ce9d285964d288",
"index": 5124,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('test_9feats.csv', 'w') as f:\n df = pd.DataFrame(file, columns=['dst_host_srv_serror_rate',\n 'dst_host_serror_rate', 'serror_rate', 'srv_serror_rate', 'count',\n 'flag', 'same_srv_rate', 'dst_host_srv_count',\n 'dst_host_diff_srv_rate', 'Malicious'])\n df.to_csv(f, index=False, header=True, line_terminator='\\n')\n print(df)\n",
"step-3": "<mask token>\nfile = pd.read_csv('KDDTest+.csv')\nwith open('test_9feats.csv', 'w') as f:\n df = pd.DataFrame(file, columns=['dst_host_srv_serror_rate',\n 'dst_host_serror_rate', 'serror_rate', 'srv_serror_rate', 'count',\n 'flag', 'same_srv_rate', 'dst_host_srv_count',\n 'dst_host_diff_srv_rate', 'Malicious'])\n df.to_csv(f, index=False, header=True, line_terminator='\\n')\n print(df)\n",
"step-4": "import pandas as pd\nfile = pd.read_csv('KDDTest+.csv')\nwith open('test_9feats.csv', 'w') as f:\n df = pd.DataFrame(file, columns=['dst_host_srv_serror_rate',\n 'dst_host_serror_rate', 'serror_rate', 'srv_serror_rate', 'count',\n 'flag', 'same_srv_rate', 'dst_host_srv_count',\n 'dst_host_diff_srv_rate', 'Malicious'])\n df.to_csv(f, index=False, header=True, line_terminator='\\n')\n print(df)\n",
"step-5": "import pandas as pd\n\nfile = pd.read_csv(\"KDDTest+.csv\")\nwith open(\"test_9feats.csv\", \"w\") as f:\n df = pd.DataFrame(file,\n columns=[\n \"dst_host_srv_serror_rate\", \"dst_host_serror_rate\",\n \"serror_rate\", \"srv_serror_rate\", \"count\", \"flag\",\n \"same_srv_rate\", \"dst_host_srv_count\",\n \"dst_host_diff_srv_rate\", \"Malicious\"\n ])\n df.to_csv(f, index=False, header=True, line_terminator='\\n')\n print(df)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class RankedHand(object):
def __init__(self, remaining_cards):
self._remaining_cards = remaining_cards
self.rank = None
def remaining_cards(self):
return self._remaining_cards
# Returns 1 if self is higher, 0 if equal, -1 if self is lower
def compare_high_cards(self, other):
s_cards = reversed(sorted(self.remaining_cards()))
o_cards = reversed(sorted(other.remaining_cards()))
for card_pair in zip(s_cards, o_cards):
print("Comparing %s and %s" % (str(card_pair[0]), str(card_pair[1])))
if(card_pair[0] > card_pair[1]):
return 1
elif(card_pair[0] < card_pair[1]):
return -1
return 0
def __eq__(self, other):
return self.rank == other.rank
def __lt__(self, other):
return self.rank < other.rank
class HighCard(RankedHand):
def __init__(self, remaining_cards):
super(HighCard, self).__init__(remaining_cards)
self.rank = 0
def __eq__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class OnePair(RankedHand):
def __init__(self, pair_cards, remaining_cards):
super(OnePair, self).__init__(remaining_cards)
self.rank = 1
self.pair_cards = pair_cards
def __eq__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__eq__(other)
else:
return self.pair_cards == other.pair_cards and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__lt__(other)
else:
return self.pair_cards < other.pair_cards or (self.pair_cards == other.pair_cards and self.compare_high_cards(other) == -1)
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
def high_pair(self):
return self.two_pair_ranks[1]
def low_pair(self):
return self.two_pair_ranks[0]
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() < other.low_pair()):
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == -1):
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif(self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(other) == -1):
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
# Account for Ace low
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif(self.three_kind_rank < other.three_kind_rank):
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif(self.four_kind_rank < other.four_kind_rank):
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
|
normal
|
{
"blob_id": "a0d1ef11d00e2ddd65b648a87f493b7adcda5115",
"index": 9412,
"step-1": "<mask token>\n\n\nclass TwoPair(RankedHand):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-2": "<mask token>\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n <mask token>\n <mask token>\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-3": "class RankedHand(object):\n <mask token>\n <mask token>\n\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair\n [1])))\n if card_pair[0] > card_pair[1]:\n return 1\n elif card_pair[0] < card_pair[1]:\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\n\nclass HighCard(RankedHand):\n\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass OnePair(RankedHand):\n\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return (self.pair_cards == other.pair_cards and self.\n compare_high_cards(other) == 0)\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return (self.pair_cards < other.pair_cards or self.pair_cards ==\n other.pair_cards and self.compare_high_cards(other) == -1)\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-4": "class RankedHand(object):\n\n def __init__(self, remaining_cards):\n self._remaining_cards = remaining_cards\n self.rank = None\n <mask token>\n\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair\n [1])))\n if card_pair[0] > card_pair[1]:\n return 1\n elif card_pair[0] < card_pair[1]:\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\n\nclass HighCard(RankedHand):\n\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass OnePair(RankedHand):\n\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return (self.pair_cards == other.pair_cards and self.\n compare_high_cards(other) == 0)\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return (self.pair_cards < other.pair_cards or self.pair_cards ==\n other.pair_cards and self.compare_high_cards(other) == -1)\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-5": "class RankedHand(object):\n def __init__(self, remaining_cards):\n self._remaining_cards = remaining_cards\n self.rank = None\n\n def remaining_cards(self):\n return self._remaining_cards\n\n # Returns 1 if self is higher, 0 if equal, -1 if self is lower\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print(\"Comparing %s and %s\" % (str(card_pair[0]), str(card_pair[1])))\n if(card_pair[0] > card_pair[1]):\n return 1\n elif(card_pair[0] < card_pair[1]):\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\nclass HighCard(RankedHand):\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\nclass OnePair(RankedHand):\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return self.pair_cards == other.pair_cards and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return self.pair_cards < other.pair_cards or (self.pair_cards == other.pair_cards and self.compare_high_cards(other) == -1)\n\nclass TwoPair(RankedHand):\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif(self.high_pair() == other.high_pair() and self.low_pair() < other.low_pair()):\n return True\n elif(self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == -1):\n return True\n else:\n return False\n\nclass ThreeKind(RankedHand):\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif(self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(other) == -1):\n return True\n else:\n return False\n\nclass Straight(RankedHand):\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n # Account for Ace low\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\nclass Flush(RankedHand):\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\nclass FullHouse(RankedHand):\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif(self.three_kind_rank < other.three_kind_rank):\n return True\n else:\n return False\n\nclass FourKind(RankedHand):\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif(self.four_kind_rank < other.four_kind_rank):\n return True\n else:\n return False\n\nclass StraightFlush(Straight):\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\nclass RoyalFlush(RankedHand):\n def __init__(self):\n self.rank = 9\n\n\n\n\n",
"step-ids": [
25,
28,
42,
43,
45
]
}
|
[
25,
28,
42,
43,
45
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='module')
def base_app(tmp_shared_volume_path):
"""Flask application fixture."""
config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':
'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':
tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':
'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'ORGANIZATIONS': ['default']}
app_ = create_app(config_mapping)
return app_
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import, print_function
import os
import shutil
import pytest
from reana_db.models import Base, User
from sqlalchemy_utils import create_database, database_exists, drop_database
from reana_workflow_controller.factory import create_app
@pytest.fixture(scope='module')
def base_app(tmp_shared_volume_path):
"""Flask application fixture."""
config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':
'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':
tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':
'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'ORGANIZATIONS': ['default']}
app_ = create_app(config_mapping)
return app_
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration for REANA-Workflow-Controller."""
from __future__ import absolute_import, print_function
import os
import shutil
import pytest
from reana_db.models import Base, User
from sqlalchemy_utils import create_database, database_exists, drop_database
from reana_workflow_controller.factory import create_app
@pytest.fixture(scope="module")
def base_app(tmp_shared_volume_path):
"""Flask application fixture."""
config_mapping = {
"SERVER_NAME": "localhost:5000",
"SECRET_KEY": "SECRET_KEY",
"TESTING": True,
"SHARED_VOLUME_PATH": tmp_shared_volume_path,
"SQLALCHEMY_DATABASE_URI": "sqlite:///testdb.db",
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
"ORGANIZATIONS": ["default"],
}
app_ = create_app(config_mapping)
return app_
|
flexible
|
{
"blob_id": "502e92d3e5d059d73016702ce0b2591a123810d3",
"index": 6892,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](scope='module')\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':\n 'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':\n tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':\n 'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'ORGANIZATIONS': ['default']}\n app_ = create_app(config_mapping)\n return app_\n",
"step-3": "<mask token>\nfrom __future__ import absolute_import, print_function\nimport os\nimport shutil\nimport pytest\nfrom reana_db.models import Base, User\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\nfrom reana_workflow_controller.factory import create_app\n\n\[email protected](scope='module')\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':\n 'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':\n tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':\n 'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'ORGANIZATIONS': ['default']}\n app_ = create_app(config_mapping)\n return app_\n",
"step-4": "# -*- coding: utf-8 -*-\n#\n# This file is part of REANA.\n# Copyright (C) 2017, 2018 CERN.\n#\n# REANA is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Pytest configuration for REANA-Workflow-Controller.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport shutil\n\nimport pytest\nfrom reana_db.models import Base, User\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\n\nfrom reana_workflow_controller.factory import create_app\n\n\[email protected](scope=\"module\")\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {\n \"SERVER_NAME\": \"localhost:5000\",\n \"SECRET_KEY\": \"SECRET_KEY\",\n \"TESTING\": True,\n \"SHARED_VOLUME_PATH\": tmp_shared_volume_path,\n \"SQLALCHEMY_DATABASE_URI\": \"sqlite:///testdb.db\",\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False,\n \"ORGANIZATIONS\": [\"default\"],\n }\n app_ = create_app(config_mapping)\n return app_\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#14681
#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.
x = int(input())
y = int(input())
if(x>0 and y>0):
print("1")
elif(x>0 and y<0):
print("4")
elif(x<0 and y>0):
print("2")
else:
print("3")
|
normal
|
{
"blob_id": "e9908e32204da8973f06d98430fc660c90b5e303",
"index": 3987,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-3": "x = int(input())\ny = int(input())\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-4": "#14681\n#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.\n\nx = int(input())\ny = int(input())\n\nif(x>0 and y>0):\n print(\"1\")\nelif(x>0 and y<0):\n print(\"4\")\nelif(x<0 and y>0):\n print(\"2\")\nelse:\n print(\"3\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SubBatchNorm3d(nn.Module):
<|reserved_special_token_0|>
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2
).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = (self.
_get_aggregated_mean_std(self.split_bn.running_mean, self.
split_bn.running_var, self.num_splits))
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2
).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = (self.
_get_aggregated_mean_std(self.split_bn.running_mean, self.
split_bn.running_var, self.num_splits))
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == 'sub_batchnorm':
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == 'sync_batchnorm':
return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.
NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)
else:
raise NotImplementedError('Norm type {} is not supported'.format(
cfg.BN.NORM_TYPE))
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2
).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = (self.
_get_aggregated_mean_std(self.split_bn.running_mean, self.
split_bn.running_var, self.num_splits))
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from functools import partial
import torch
import torch.nn as nn
from pytorchvideo.layers.batch_norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == 'sub_batchnorm':
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == 'sync_batchnorm':
return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.
NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)
else:
raise NotImplementedError('Norm type {} is not supported'.format(
cfg.BN.NORM_TYPE))
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2
).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = (self.
_get_aggregated_mean_std(self.split_bn.running_mean, self.
split_bn.running_var, self.num_splits))
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
<|reserved_special_token_1|>
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""BatchNorm (BN) utility functions and custom batch-size BN implementations"""
from functools import partial
import torch
import torch.nn as nn
from pytorchvideo.layers.batch_norm import (
NaiveSyncBatchNorm1d,
NaiveSyncBatchNorm3d,
) # noqa
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}:
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == "sync_batchnorm":
return partial(
NaiveSyncBatchNorm3d,
num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,
global_sync=cfg.BN.GLOBAL_SYNC,
)
else:
raise NotImplementedError(
"Norm type {} is not supported".format(cfg.BN.NORM_TYPE)
)
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args["num_features"]
# Keep only one set of weight and bias.
if args.get("affine", True):
self.affine = True
args["affine"] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args["num_features"] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
self.num_splits,
)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
|
flexible
|
{
"blob_id": "4e5e1be289b32655736d8c6c02d354a85d4268b7",
"index": 3027,
"step-1": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n <mask token>\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n",
"step-2": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n",
"step-3": "<mask token>\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n",
"step-4": "<mask token>\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nfrom pytorchvideo.layers.batch_norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n",
"step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"BatchNorm (BN) utility functions and custom batch-size BN implementations\"\"\"\n\nfrom functools import partial\nimport torch\nimport torch.nn as nn\n\nfrom pytorchvideo.layers.batch_norm import (\n NaiveSyncBatchNorm1d,\n NaiveSyncBatchNorm3d,\n) # noqa\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {\"batchnorm\", \"sync_batchnorm_apex\"}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == \"sub_batchnorm\":\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == \"sync_batchnorm\":\n return partial(\n NaiveSyncBatchNorm3d,\n num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,\n global_sync=cfg.BN.GLOBAL_SYNC,\n )\n else:\n raise NotImplementedError(\n \"Norm type {} is not supported\".format(cfg.BN.NORM_TYPE)\n )\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args[\"num_features\"]\n # Keep only one set of weight and bias.\n if args.get(\"affine\", True):\n self.affine = True\n args[\"affine\"] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args[\"num_features\"] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = (\n stds.view(n, -1).sum(0) / n\n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n\n )\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def two_sum(nums, target):
dct = {}
for i, num1 in enumerate(nums):
num2 = target - num1
if num2 in dct:
return [dct[num2], i]
dct[num1] = i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def two_sum(nums, target):
dct = {}
for i, num1 in enumerate(nums):
num2 = target - num1
if num2 in dct:
return [dct[num2], i]
dct[num1] = i
print(two_sum([14, 2, 31, 4], 6))
<|reserved_special_token_1|>
"""
时间最优
思路:
将和为目标值的那 两个 整数定义为 num1 和 num2
创建一个新字典,内容存在数组中的数字及索引
将数组nums转换为字典,
遍历字典, num1为字典中的元素(其实与数组总的元素一样),
num2 为 target减去num1, 判定num2是否在字典中,如果存在,返回字典中num2的值(也就是在数组nums中的下标)和 i(也就是num1在数组中的下标)
如果不存在,设置字典num1的值为i
"""
def two_sum(nums, target):
dct = {}
for i, num1 in enumerate(nums):
num2 = target - num1
if num2 in dct:
return [dct[num2], i]
dct[num1] = i
print(two_sum([14, 2, 31, 4], 6))
|
flexible
|
{
"blob_id": "dac8dbb0eba78d4f8dfbe3284325735324a87dc2",
"index": 8674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef two_sum(nums, target):\n dct = {}\n for i, num1 in enumerate(nums):\n num2 = target - num1\n if num2 in dct:\n return [dct[num2], i]\n dct[num1] = i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef two_sum(nums, target):\n dct = {}\n for i, num1 in enumerate(nums):\n num2 = target - num1\n if num2 in dct:\n return [dct[num2], i]\n dct[num1] = i\n\n\nprint(two_sum([14, 2, 31, 4], 6))\n",
"step-4": "\"\"\"\r\n时间最优\r\n\r\n思路:\r\n将和为目标值的那 两个 整数定义为 num1 和 num2\r\n创建一个新字典,内容存在数组中的数字及索引\r\n将数组nums转换为字典,\r\n遍历字典, num1为字典中的元素(其实与数组总的元素一样),\r\nnum2 为 target减去num1, 判定num2是否在字典中,如果存在,返回字典中num2的值(也就是在数组nums中的下标)和 i(也就是num1在数组中的下标)\r\n如果不存在,设置字典num1的值为i\r\n\"\"\"\r\n\r\ndef two_sum(nums, target):\r\n dct = {}\r\n for i, num1 in enumerate(nums):\r\n num2 = target - num1\r\n if num2 in dct:\r\n return [dct[num2], i]\r\n dct[num1] = i\r\n\r\n\r\nprint(two_sum([14, 2, 31, 4], 6))\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Table(Base):
<|reserved_special_token_0|>
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace('-', '_')
self.dataset_id = dataset_id.replace('-', '_')
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(prod=
f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}"
, staging=
f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}"
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / 'table_config.yaml')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _load_schema(self, mode='staging'):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f'schema-{mode}.json'
columns = self.table_config['columns']
if mode == 'staging':
new_columns = []
for c in columns:
is_in_staging = True if c.get('is_in_staging') is None else c[
'is_in_staging']
if is_in_staging and not c.get('is_partition'):
c['type'] = 'STRING'
new_columns.append(c)
del columns
columns = new_columns
elif mode == 'prod':
schema = self._get_table_obj(mode).schema
column_names = [c['name'] for c in columns]
schema_names = [s.name for s in schema]
not_in_columns = [name for name in schema_names if name not in
column_names]
not_in_schema = [name for name in column_names if name not in
schema_names]
if not_in_columns:
raise BaseDosDadosException(
'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_columns, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
if not_in_schema:
raise BaseDosDadosException(
'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_schema, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
for c in columns:
for s in schema:
if c['name'] == s.name:
c['type'] = s.field_type
c['mode'] = s.mode
break
json.dump(columns, json_path.open('w', encoding='utf-8'))
return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
project_id_prod = self.client['bigquery_prod'].project
publish_txt += f"""
CREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS
SELECT
"""
if self._is_partitioned():
columns = sorted(self.table_config['columns'], key=lambda k: (k
['is_partition'] is not None, k['is_partition']), reverse=True)
else:
columns = self.table_config['columns']
for col in columns:
name = col['name']
bigquery_type = 'STRING' if col['bigquery_type'] is None else col[
'bigquery_type'].upper()
publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\n'
publish_txt = publish_txt[:-2] + '\n'
project_id_staging = self.client['bigquery_staging'].project
publish_txt += (
f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'
)
(self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(
publish_txt)
<|reserved_special_token_0|>
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace('edit#gid=',
'export?format=csv&gid=')
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).
content.decode('utf-8')))
except Exception as e:
raise BaseDosDadosException(
'Check if your google sheet Share are: Anyone on the internet with this link can view'
) from e
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update(self, mode='all'):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ['prod', 'staging'] if mode == 'all' else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
table.description = self._render_template(Path(
'table/table_description.txt'), self.table_config)
with open(self.metadata_path / self.dataset_id / self.table_id /
'table_description.txt', 'w', encoding='utf-8') as f:
f.write(table.description)
table.schema = self._load_schema(m)
fields = ['description', 'schema'] if m == 'prod' else [
'description']
self.client[f'bigquery_{m}'].update_table(table, fields=fields)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='updated')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def append(self, filepath, partitions=None, if_exists='replace',
chunk_size=None, **upload_args):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists('staging'):
raise BaseDosDadosException(
'You cannot append to a table that does not exist')
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath, mode='staging', partitions=partitions, if_exists=
if_exists, chunk_size=chunk_size, **upload_args)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='appended')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(Base):
<|reserved_special_token_0|>
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace('-', '_')
self.dataset_id = dataset_id.replace('-', '_')
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(prod=
f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}"
, staging=
f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}"
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / 'table_config.yaml')
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f'bigquery_{mode}'].get_table(self.
table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
partitions = self.table_config['partitions']
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
return all(item is not None for item in partitions)
raise ValueError('Partitions must be a list or None')
def _load_schema(self, mode='staging'):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f'schema-{mode}.json'
columns = self.table_config['columns']
if mode == 'staging':
new_columns = []
for c in columns:
is_in_staging = True if c.get('is_in_staging') is None else c[
'is_in_staging']
if is_in_staging and not c.get('is_partition'):
c['type'] = 'STRING'
new_columns.append(c)
del columns
columns = new_columns
elif mode == 'prod':
schema = self._get_table_obj(mode).schema
column_names = [c['name'] for c in columns]
schema_names = [s.name for s in schema]
not_in_columns = [name for name in schema_names if name not in
column_names]
not_in_schema = [name for name in column_names if name not in
schema_names]
if not_in_columns:
raise BaseDosDadosException(
'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_columns, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
if not_in_schema:
raise BaseDosDadosException(
'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_schema, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
for c in columns:
for s in schema:
if c['name'] == s.name:
c['type'] = s.field_type
c['mode'] = s.mode
break
json.dump(columns, json_path.open('w', encoding='utf-8'))
return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
project_id_prod = self.client['bigquery_prod'].project
publish_txt += f"""
CREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS
SELECT
"""
if self._is_partitioned():
columns = sorted(self.table_config['columns'], key=lambda k: (k
['is_partition'] is not None, k['is_partition']), reverse=True)
else:
columns = self.table_config['columns']
for col in columns:
name = col['name']
bigquery_type = 'STRING' if col['bigquery_type'] is None else col[
'bigquery_type'].upper()
publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\n'
publish_txt = publish_txt[:-2] + '\n'
project_id_staging = self.client['bigquery_staging'].project
publish_txt += (
f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'
)
(self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(
publish_txt)
<|reserved_special_token_0|>
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace('edit#gid=',
'export?format=csv&gid=')
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).
content.decode('utf-8')))
except Exception as e:
raise BaseDosDadosException(
'Check if your google sheet Share are: Anyone on the internet with this link can view'
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update(self, mode='all'):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ['prod', 'staging'] if mode == 'all' else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
table.description = self._render_template(Path(
'table/table_description.txt'), self.table_config)
with open(self.metadata_path / self.dataset_id / self.table_id /
'table_description.txt', 'w', encoding='utf-8') as f:
f.write(table.description)
table.schema = self._load_schema(m)
fields = ['description', 'schema'] if m == 'prod' else [
'description']
self.client[f'bigquery_{m}'].update_table(table, fields=fields)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='updated')
def publish(self, if_exists='raise'):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == 'replace':
self.delete(mode='prod')
self.client['bigquery_prod'].query((self.table_folder /
'publish.sql').open('r', encoding='utf-8').read()).result()
self.update()
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='published')
<|reserved_special_token_0|>
def append(self, filepath, partitions=None, if_exists='replace',
chunk_size=None, **upload_args):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists('staging'):
raise BaseDosDadosException(
'You cannot append to a table that does not exist')
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath, mode='staging', partitions=partitions, if_exists=
if_exists, chunk_size=chunk_size, **upload_args)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='appended')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(Base):
<|reserved_special_token_0|>
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace('-', '_')
self.dataset_id = dataset_id.replace('-', '_')
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(prod=
f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}"
, staging=
f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}"
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / 'table_config.yaml')
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f'bigquery_{mode}'].get_table(self.
table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
partitions = self.table_config['partitions']
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
return all(item is not None for item in partitions)
raise ValueError('Partitions must be a list or None')
def _load_schema(self, mode='staging'):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f'schema-{mode}.json'
columns = self.table_config['columns']
if mode == 'staging':
new_columns = []
for c in columns:
is_in_staging = True if c.get('is_in_staging') is None else c[
'is_in_staging']
if is_in_staging and not c.get('is_partition'):
c['type'] = 'STRING'
new_columns.append(c)
del columns
columns = new_columns
elif mode == 'prod':
schema = self._get_table_obj(mode).schema
column_names = [c['name'] for c in columns]
schema_names = [s.name for s in schema]
not_in_columns = [name for name in schema_names if name not in
column_names]
not_in_schema = [name for name in column_names if name not in
schema_names]
if not_in_columns:
raise BaseDosDadosException(
'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_columns, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
if not_in_schema:
raise BaseDosDadosException(
'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_schema, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
for c in columns:
for s in schema:
if c['name'] == s.name:
c['type'] = s.field_type
c['mode'] = s.mode
break
json.dump(columns, json_path.open('w', encoding='utf-8'))
return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
project_id_prod = self.client['bigquery_prod'].project
publish_txt += f"""
CREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS
SELECT
"""
if self._is_partitioned():
columns = sorted(self.table_config['columns'], key=lambda k: (k
['is_partition'] is not None, k['is_partition']), reverse=True)
else:
columns = self.table_config['columns']
for col in columns:
name = col['name']
bigquery_type = 'STRING' if col['bigquery_type'] is None else col[
'bigquery_type'].upper()
publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\n'
publish_txt = publish_txt[:-2] + '\n'
project_id_staging = self.client['bigquery_staging'].project
publish_txt += (
f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'
)
(self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(
publish_txt)
def _make_template(self, columns, partition_columns,
if_table_config_exists, force_columns):
self.metadata.create(if_exists=if_table_config_exists, columns=
partition_columns + columns, partition_columns=
partition_columns, force_columns=force_columns, table_only=False)
self._make_publish_sql()
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace('edit#gid=',
'export?format=csv&gid=')
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).
content.decode('utf-8')))
except Exception as e:
raise BaseDosDadosException(
'Check if your google sheet Share are: Anyone on the internet with this link can view'
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
def update_columns(self, columns_config_url_or_path=None):
"""
Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate
publish.sql and autofill type using bigquery_type.
The sheet must contain the columns:
- name: column name
- description: column description
- bigquery_type: column bigquery type
- measurement_unit: column mesurement unit
- covered_by_dictionary: column related dictionary
- directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>
- temporal_coverage: column temporal coverage
- has_sensitive_data: the column has sensitive data
- observations: column observations
Args:
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
"""
ruamel = ryaml.YAML()
ruamel.preserve_quotes = True
ruamel.indent(mapping=4, sequence=6, offset=4)
table_config_yaml = ruamel.load((self.table_folder /
'table_config.yaml').open(encoding='utf-8'))
if ('https://docs.google.com/spreadsheets/d/' in
columns_config_url_or_path):
if ('edit#gid=' not in columns_config_url_or_path or
'https://docs.google.com/spreadsheets/d/' not in
columns_config_url_or_path or not
columns_config_url_or_path.split('=')[1].isdigit()):
raise BaseDosDadosException(
'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'
)
df = self._sheet_to_df(columns_config_url_or_path)
else:
file_type = columns_config_url_or_path.split('.')[-1]
if file_type == 'csv':
df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')
elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',
'odt']:
df = pd.read_excel(columns_config_url_or_path)
else:
raise BaseDosDadosException(
'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'
)
df = df.fillna('NULL')
required_columns = ['name', 'bigquery_type', 'description',
'temporal_coverage', 'covered_by_dictionary',
'directory_column', 'measurement_unit', 'has_sensitive_data',
'observations']
not_found_columns = required_columns.copy()
for sheet_column in df.columns.tolist():
for required_column in required_columns:
if sheet_column == required_column:
not_found_columns.remove(required_column)
if not_found_columns:
raise BaseDosDadosException(
f"The following required columns are not found: {', '.join(not_found_columns)}."
)
columns_parameters = zip(*[df[required_column].tolist() for
required_column in required_columns])
for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:
for col in table_config_yaml['columns']:
if col['name'] == name:
col['bigquery_type'] = col['bigquery_type'
] if bigquery_type == 'NULL' else bigquery_type.lower()
col['description'] = col['description'
] if description == 'NULL' else description
col['temporal_coverage'] = col['temporal_coverage'
] if temporal_coverage == 'NULL' else [
temporal_coverage]
col['covered_by_dictionary'] = ('no' if
covered_by_dictionary == 'NULL' else
covered_by_dictionary)
dataset = directory_column.split('.')[0]
col['directory_column']['dataset_id'] = col[
'directory_column']['dataset_id'
] if dataset == 'NULL' else dataset
table = directory_column.split('.')[-1].split(':')[0]
col['directory_column']['table_id'] = col[
'directory_column']['table_id'
] if table == 'NULL' else table
column = directory_column.split('.')[-1].split(':')[-1]
col['directory_column']['column_name'] = col[
'directory_column']['column_name'
] if column == 'NULL' else column
col['measurement_unit'] = col['measurement_unit'
] if measurement_unit == 'NULL' else measurement_unit
col['has_sensitive_data'] = ('no' if has_sensitive_data ==
'NULL' else has_sensitive_data)
col['observations'] = col['observations'
] if observations == 'NULL' else observations
with open(self.table_folder / 'table_config.yaml', 'w', encoding=
'utf-8') as f:
ruamel.dump(table_config_yaml, f)
self._make_publish_sql()
def init(self, data_sample_path=None, if_folder_exists='raise',
if_table_config_exists='raise', source_format='csv', force_columns=
False, columns_config_url_or_path=None):
"""Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.
The folder should contain:
* `table_config.yaml`
* `publish.sql`
You can also point to a sample of the data to auto complete columns names.
Args:
data_sample_path (str, pathlib.PosixPath): Optional.
Data sample path to auto complete columns names
It supports Comma Delimited CSV, Apache Avro and
Apache Parquet.
if_folder_exists (str): Optional.
What to do if table folder exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace folder
* 'pass' : Do nothing
if_table_config_exists (str): Optional
What to do if table_config.yaml and publish.sql exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace files with blank template
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
Raises:
FileExistsError: If folder exists and replace is False.
NotImplementedError: If data sample is not in supported type or format.
"""
if not self.dataset_folder.exists():
raise FileExistsError(
f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'
)
try:
self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')
except FileExistsError as e:
if if_folder_exists == 'raise':
raise FileExistsError(
f'Table folder already exists for {self.table_id}. '
) from e
if if_folder_exists == 'pass':
return self
if not data_sample_path and if_table_config_exists != 'pass':
raise BaseDosDadosException(
'You must provide a path to correctly create config files')
partition_columns = []
if isinstance(data_sample_path, (str, Path)):
data_sample_path = Path(data_sample_path)
if data_sample_path.is_dir():
data_sample_path = [f for f in data_sample_path.glob('**/*'
) if f.is_file() and f.suffix == f'.{source_format}'][0]
partition_columns = [k.split('=')[0] for k in
data_sample_path.as_posix().split('/') if '=' in k]
columns = Datatype(self, source_format).header(data_sample_path)
else:
columns = ['column_name']
if if_table_config_exists == 'pass':
if Path(self.table_folder / 'table_config.yaml').is_file(
) and Path(self.table_folder / 'publish.sql').is_file():
pass
elif not data_sample_path:
raise BaseDosDadosException(
'You must provide a path to correctly create config files')
else:
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
elif if_table_config_exists == 'raise':
if Path(self.table_folder / 'table_config.yaml').is_file(
) and Path(self.table_folder / 'publish.sql').is_file():
raise FileExistsError(
f'table_config.yaml and publish.sql already exists at {self.table_folder}'
)
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
else:
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
if columns_config_url_or_path is not None:
self.update_columns(columns_config_url_or_path)
return self
<|reserved_special_token_0|>
def update(self, mode='all'):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ['prod', 'staging'] if mode == 'all' else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
table.description = self._render_template(Path(
'table/table_description.txt'), self.table_config)
with open(self.metadata_path / self.dataset_id / self.table_id /
'table_description.txt', 'w', encoding='utf-8') as f:
f.write(table.description)
table.schema = self._load_schema(m)
fields = ['description', 'schema'] if m == 'prod' else [
'description']
self.client[f'bigquery_{m}'].update_table(table, fields=fields)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='updated')
def publish(self, if_exists='raise'):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == 'replace':
self.delete(mode='prod')
self.client['bigquery_prod'].query((self.table_folder /
'publish.sql').open('r', encoding='utf-8').read()).result()
self.update()
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='published')
<|reserved_special_token_0|>
def append(self, filepath, partitions=None, if_exists='replace',
chunk_size=None, **upload_args):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists('staging'):
raise BaseDosDadosException(
'You cannot append to a table that does not exist')
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath, mode='staging', partitions=partitions, if_exists=
if_exists, chunk_size=chunk_size, **upload_args)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='appended')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(Base):
<|reserved_special_token_0|>
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace('-', '_')
self.dataset_id = dataset_id.replace('-', '_')
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(prod=
f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}"
, staging=
f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}"
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / 'table_config.yaml')
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f'bigquery_{mode}'].get_table(self.
table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
partitions = self.table_config['partitions']
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
return all(item is not None for item in partitions)
raise ValueError('Partitions must be a list or None')
def _load_schema(self, mode='staging'):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f'schema-{mode}.json'
columns = self.table_config['columns']
if mode == 'staging':
new_columns = []
for c in columns:
is_in_staging = True if c.get('is_in_staging') is None else c[
'is_in_staging']
if is_in_staging and not c.get('is_partition'):
c['type'] = 'STRING'
new_columns.append(c)
del columns
columns = new_columns
elif mode == 'prod':
schema = self._get_table_obj(mode).schema
column_names = [c['name'] for c in columns]
schema_names = [s.name for s in schema]
not_in_columns = [name for name in schema_names if name not in
column_names]
not_in_schema = [name for name in column_names if name not in
schema_names]
if not_in_columns:
raise BaseDosDadosException(
'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_columns, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
if not_in_schema:
raise BaseDosDadosException(
'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'
.format(error_columns=not_in_schema, project_id=self.
table_config['project_id_prod'], dataset_id=self.
table_config['dataset_id'], table_id=self.table_config[
'table_id']))
for c in columns:
for s in schema:
if c['name'] == s.name:
c['type'] = s.field_type
c['mode'] = s.mode
break
json.dump(columns, json_path.open('w', encoding='utf-8'))
return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
project_id_prod = self.client['bigquery_prod'].project
publish_txt += f"""
CREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS
SELECT
"""
if self._is_partitioned():
columns = sorted(self.table_config['columns'], key=lambda k: (k
['is_partition'] is not None, k['is_partition']), reverse=True)
else:
columns = self.table_config['columns']
for col in columns:
name = col['name']
bigquery_type = 'STRING' if col['bigquery_type'] is None else col[
'bigquery_type'].upper()
publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\n'
publish_txt = publish_txt[:-2] + '\n'
project_id_staging = self.client['bigquery_staging'].project
publish_txt += (
f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'
)
(self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(
publish_txt)
def _make_template(self, columns, partition_columns,
if_table_config_exists, force_columns):
self.metadata.create(if_exists=if_table_config_exists, columns=
partition_columns + columns, partition_columns=
partition_columns, force_columns=force_columns, table_only=False)
self._make_publish_sql()
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace('edit#gid=',
'export?format=csv&gid=')
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).
content.decode('utf-8')))
except Exception as e:
raise BaseDosDadosException(
'Check if your google sheet Share are: Anyone on the internet with this link can view'
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
def update_columns(self, columns_config_url_or_path=None):
"""
Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate
publish.sql and autofill type using bigquery_type.
The sheet must contain the columns:
- name: column name
- description: column description
- bigquery_type: column bigquery type
- measurement_unit: column mesurement unit
- covered_by_dictionary: column related dictionary
- directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>
- temporal_coverage: column temporal coverage
- has_sensitive_data: the column has sensitive data
- observations: column observations
Args:
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
"""
ruamel = ryaml.YAML()
ruamel.preserve_quotes = True
ruamel.indent(mapping=4, sequence=6, offset=4)
table_config_yaml = ruamel.load((self.table_folder /
'table_config.yaml').open(encoding='utf-8'))
if ('https://docs.google.com/spreadsheets/d/' in
columns_config_url_or_path):
if ('edit#gid=' not in columns_config_url_or_path or
'https://docs.google.com/spreadsheets/d/' not in
columns_config_url_or_path or not
columns_config_url_or_path.split('=')[1].isdigit()):
raise BaseDosDadosException(
'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'
)
df = self._sheet_to_df(columns_config_url_or_path)
else:
file_type = columns_config_url_or_path.split('.')[-1]
if file_type == 'csv':
df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')
elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',
'odt']:
df = pd.read_excel(columns_config_url_or_path)
else:
raise BaseDosDadosException(
'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'
)
df = df.fillna('NULL')
required_columns = ['name', 'bigquery_type', 'description',
'temporal_coverage', 'covered_by_dictionary',
'directory_column', 'measurement_unit', 'has_sensitive_data',
'observations']
not_found_columns = required_columns.copy()
for sheet_column in df.columns.tolist():
for required_column in required_columns:
if sheet_column == required_column:
not_found_columns.remove(required_column)
if not_found_columns:
raise BaseDosDadosException(
f"The following required columns are not found: {', '.join(not_found_columns)}."
)
columns_parameters = zip(*[df[required_column].tolist() for
required_column in required_columns])
for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:
for col in table_config_yaml['columns']:
if col['name'] == name:
col['bigquery_type'] = col['bigquery_type'
] if bigquery_type == 'NULL' else bigquery_type.lower()
col['description'] = col['description'
] if description == 'NULL' else description
col['temporal_coverage'] = col['temporal_coverage'
] if temporal_coverage == 'NULL' else [
temporal_coverage]
col['covered_by_dictionary'] = ('no' if
covered_by_dictionary == 'NULL' else
covered_by_dictionary)
dataset = directory_column.split('.')[0]
col['directory_column']['dataset_id'] = col[
'directory_column']['dataset_id'
] if dataset == 'NULL' else dataset
table = directory_column.split('.')[-1].split(':')[0]
col['directory_column']['table_id'] = col[
'directory_column']['table_id'
] if table == 'NULL' else table
column = directory_column.split('.')[-1].split(':')[-1]
col['directory_column']['column_name'] = col[
'directory_column']['column_name'
] if column == 'NULL' else column
col['measurement_unit'] = col['measurement_unit'
] if measurement_unit == 'NULL' else measurement_unit
col['has_sensitive_data'] = ('no' if has_sensitive_data ==
'NULL' else has_sensitive_data)
col['observations'] = col['observations'
] if observations == 'NULL' else observations
with open(self.table_folder / 'table_config.yaml', 'w', encoding=
'utf-8') as f:
ruamel.dump(table_config_yaml, f)
self._make_publish_sql()
def init(self, data_sample_path=None, if_folder_exists='raise',
if_table_config_exists='raise', source_format='csv', force_columns=
False, columns_config_url_or_path=None):
"""Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.
The folder should contain:
* `table_config.yaml`
* `publish.sql`
You can also point to a sample of the data to auto complete columns names.
Args:
data_sample_path (str, pathlib.PosixPath): Optional.
Data sample path to auto complete columns names
It supports Comma Delimited CSV, Apache Avro and
Apache Parquet.
if_folder_exists (str): Optional.
What to do if table folder exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace folder
* 'pass' : Do nothing
if_table_config_exists (str): Optional
What to do if table_config.yaml and publish.sql exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace files with blank template
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
Raises:
FileExistsError: If folder exists and replace is False.
NotImplementedError: If data sample is not in supported type or format.
"""
if not self.dataset_folder.exists():
raise FileExistsError(
f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'
)
try:
self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')
except FileExistsError as e:
if if_folder_exists == 'raise':
raise FileExistsError(
f'Table folder already exists for {self.table_id}. '
) from e
if if_folder_exists == 'pass':
return self
if not data_sample_path and if_table_config_exists != 'pass':
raise BaseDosDadosException(
'You must provide a path to correctly create config files')
partition_columns = []
if isinstance(data_sample_path, (str, Path)):
data_sample_path = Path(data_sample_path)
if data_sample_path.is_dir():
data_sample_path = [f for f in data_sample_path.glob('**/*'
) if f.is_file() and f.suffix == f'.{source_format}'][0]
partition_columns = [k.split('=')[0] for k in
data_sample_path.as_posix().split('/') if '=' in k]
columns = Datatype(self, source_format).header(data_sample_path)
else:
columns = ['column_name']
if if_table_config_exists == 'pass':
if Path(self.table_folder / 'table_config.yaml').is_file(
) and Path(self.table_folder / 'publish.sql').is_file():
pass
elif not data_sample_path:
raise BaseDosDadosException(
'You must provide a path to correctly create config files')
else:
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
elif if_table_config_exists == 'raise':
if Path(self.table_folder / 'table_config.yaml').is_file(
) and Path(self.table_folder / 'publish.sql').is_file():
raise FileExistsError(
f'table_config.yaml and publish.sql already exists at {self.table_folder}'
)
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
else:
self._make_template(columns, partition_columns,
if_table_config_exists, force_columns=force_columns)
if columns_config_url_or_path is not None:
self.update_columns(columns_config_url_or_path)
return self
<|reserved_special_token_0|>
def update(self, mode='all'):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ['prod', 'staging'] if mode == 'all' else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
table.description = self._render_template(Path(
'table/table_description.txt'), self.table_config)
with open(self.metadata_path / self.dataset_id / self.table_id /
'table_description.txt', 'w', encoding='utf-8') as f:
f.write(table.description)
table.schema = self._load_schema(m)
fields = ['description', 'schema'] if m == 'prod' else [
'description']
self.client[f'bigquery_{m}'].update_table(table, fields=fields)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='updated')
def publish(self, if_exists='raise'):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == 'replace':
self.delete(mode='prod')
self.client['bigquery_prod'].query((self.table_folder /
'publish.sql').open('r', encoding='utf-8').read()).result()
self.update()
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='published')
def delete(self, mode):
"""Deletes table in BigQuery.
Args:
mode (str): Table of which table to delete [prod|staging]
"""
self._check_mode(mode)
if mode == 'all':
for m, n in self.table_full_name[mode].items():
self.client[f'bigquery_{m}'].delete_table(n, not_found_ok=True)
logger.info(' {object} {object_id}_{mode} was {action}!',
object_id=self.table_id, mode=mode, object='Table', action=
'deleted')
else:
self.client[f'bigquery_{mode}'].delete_table(self.
table_full_name[mode], not_found_ok=True)
logger.info(' {object} {object_id}_{mode} was {action}!',
object_id=self.table_id, mode=mode, object='Table', action=
'deleted')
def append(self, filepath, partitions=None, if_exists='replace',
chunk_size=None, **upload_args):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists('staging'):
raise BaseDosDadosException(
'You cannot append to a table that does not exist')
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath, mode='staging', partitions=partitions, if_exists=
if_exists, chunk_size=chunk_size, **upload_args)
logger.success(' {object} {object_id} was {action}!', object_id=
self.table_id, object='Table', action='appended')
<|reserved_special_token_1|>
"""
Class for manage tables in Storage and Big Query
"""
# pylint: disable=invalid-name, too-many-locals, too-many-branches, too-many-arguments,line-too-long,R0801,consider-using-f-string
from pathlib import Path
import json
from copy import deepcopy
import textwrap
import inspect
from io import StringIO
from loguru import logger
from google.cloud import bigquery
import ruamel.yaml as ryaml
import requests
import pandas as pd
import google.api_core.exceptions
from basedosdados.upload.base import Base
from basedosdados.upload.storage import Storage
from basedosdados.upload.dataset import Dataset
from basedosdados.upload.datatypes import Datatype
from basedosdados.upload.metadata import Metadata
from basedosdados.exceptions import BaseDosDadosException
class Table(Base):
"""
Manage tables in Google Cloud Storage and BigQuery.
"""
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace("-", "_")
self.dataset_id = dataset_id.replace("-", "_")
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(
prod=f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}",
staging=f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}",
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / "table_config.yaml")
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f"bigquery_{mode}"].get_table(self.table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic
partitions = self.table_config["partitions"]
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
# check if any None inside list.
# False if it is the case Ex: [None, 'partition']
# True otherwise Ex: ['partition1', 'partition2']
return all(item is not None for item in partitions)
raise ValueError("Partitions must be a list or None")
def _load_schema(self, mode="staging"):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f"schema-{mode}.json"
columns = self.table_config["columns"]
if mode == "staging":
new_columns = []
for c in columns:
# case is_in_staging are None then must be True
is_in_staging = (
True if c.get("is_in_staging") is None else c["is_in_staging"]
)
# append columns declared in table_config.yaml to schema only if is_in_staging: True
if is_in_staging and not c.get("is_partition"):
c["type"] = "STRING"
new_columns.append(c)
del columns
columns = new_columns
elif mode == "prod":
schema = self._get_table_obj(mode).schema
# get field names for fields at schema and at table_config.yaml
column_names = [c["name"] for c in columns]
schema_names = [s.name for s in schema]
# check if there are mismatched fields
not_in_columns = [name for name in schema_names if name not in column_names]
not_in_schema = [name for name in column_names if name not in schema_names]
# raise if field is not in table_config
if not_in_columns:
raise BaseDosDadosException(
"Column {error_columns} was not found in table_config.yaml. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_columns,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# raise if field is not in schema
if not_in_schema:
raise BaseDosDadosException(
"Column {error_columns} was not found in publish.sql. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_schema,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# if field is in schema, get field_type and field_mode
for c in columns:
for s in schema:
if c["name"] == s.name:
c["type"] = s.field_type
c["mode"] = s.mode
break
## force utf-8, write schema_{mode}.json
json.dump(columns, (json_path).open("w", encoding="utf-8"))
# load new created schema
return self.client[f"bigquery_{mode}"].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
### publish.sql header and instructions
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
# remove triple quotes extra space
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
# add create table statement
project_id_prod = self.client["bigquery_prod"].project
publish_txt += f"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n"
# sort columns by is_partition, partitions_columns come first
if self._is_partitioned():
columns = sorted(
self.table_config["columns"],
key=lambda k: (k["is_partition"] is not None, k["is_partition"]),
reverse=True,
)
else:
columns = self.table_config["columns"]
# add columns in publish.sql
for col in columns:
name = col["name"]
bigquery_type = (
"STRING"
if col["bigquery_type"] is None
else col["bigquery_type"].upper()
)
publish_txt += f"SAFE_CAST({name} AS {bigquery_type}) {name},\n"
## remove last comma
publish_txt = publish_txt[:-2] + "\n"
# add from statement
project_id_staging = self.client["bigquery_staging"].project
publish_txt += (
f"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t"
)
# save publish.sql in table_folder
(self.table_folder / "publish.sql").open("w", encoding="utf-8").write(
publish_txt
)
def _make_template(self, columns, partition_columns, if_table_config_exists, force_columns):
# create table_config.yaml with metadata
self.metadata.create(
if_exists=if_table_config_exists,
columns=partition_columns + columns,
partition_columns=partition_columns,
force_columns=force_columns,
table_only=False,
)
self._make_publish_sql()
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace("edit#gid=", "export?format=csv&gid=")
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode("utf-8")))
except Exception as e:
raise BaseDosDadosException(
"Check if your google sheet Share are: Anyone on the internet with this link can view"
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
def update_columns(self, columns_config_url_or_path=None):
"""
Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate
publish.sql and autofill type using bigquery_type.
The sheet must contain the columns:
- name: column name
- description: column description
- bigquery_type: column bigquery type
- measurement_unit: column mesurement unit
- covered_by_dictionary: column related dictionary
- directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>
- temporal_coverage: column temporal coverage
- has_sensitive_data: the column has sensitive data
- observations: column observations
Args:
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
"""
ruamel = ryaml.YAML()
ruamel.preserve_quotes = True
ruamel.indent(mapping=4, sequence=6, offset=4)
table_config_yaml = ruamel.load(
(self.table_folder / "table_config.yaml").open(encoding="utf-8")
)
if "https://docs.google.com/spreadsheets/d/" in columns_config_url_or_path:
if (
"edit#gid=" not in columns_config_url_or_path
or "https://docs.google.com/spreadsheets/d/"
not in columns_config_url_or_path
or not columns_config_url_or_path.split("=")[1].isdigit()
):
raise BaseDosDadosException(
"The Google sheet url not in correct format."
"The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>"
)
df = self._sheet_to_df(columns_config_url_or_path)
else:
file_type = columns_config_url_or_path.split(".")[-1]
if file_type == "csv":
df = pd.read_csv(columns_config_url_or_path, encoding="utf-8")
elif file_type in ["xls", "xlsx", "xlsm", "xlsb", "odf", "ods", "odt"]:
df = pd.read_excel(columns_config_url_or_path)
else:
raise BaseDosDadosException(
"File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported."
)
df = df.fillna("NULL")
required_columns = [
"name",
"bigquery_type",
"description",
"temporal_coverage",
"covered_by_dictionary",
"directory_column",
"measurement_unit",
"has_sensitive_data",
"observations",
]
not_found_columns = required_columns.copy()
for sheet_column in df.columns.tolist():
for required_column in required_columns:
if sheet_column == required_column:
not_found_columns.remove(required_column)
if not_found_columns:
raise BaseDosDadosException(
f"The following required columns are not found: {', '.join(not_found_columns)}."
)
columns_parameters = zip(
*[df[required_column].tolist() for required_column in required_columns]
)
for (
name,
bigquery_type,
description,
temporal_coverage,
covered_by_dictionary,
directory_column,
measurement_unit,
has_sensitive_data,
observations,
) in columns_parameters:
for col in table_config_yaml["columns"]:
if col["name"] == name:
col["bigquery_type"] = (
col["bigquery_type"]
if bigquery_type == "NULL"
else bigquery_type.lower()
)
col["description"] = (
col["description"] if description == "NULL" else description
)
col["temporal_coverage"] = (
col["temporal_coverage"]
if temporal_coverage == "NULL"
else [temporal_coverage]
)
col["covered_by_dictionary"] = (
"no"
if covered_by_dictionary == "NULL"
else covered_by_dictionary
)
dataset = directory_column.split(".")[0]
col["directory_column"]["dataset_id"] = (
col["directory_column"]["dataset_id"]
if dataset == "NULL"
else dataset
)
table = directory_column.split(".")[-1].split(":")[0]
col["directory_column"]["table_id"] = (
col["directory_column"]["table_id"]
if table == "NULL"
else table
)
column = directory_column.split(".")[-1].split(":")[-1]
col["directory_column"]["column_name"] = (
col["directory_column"]["column_name"]
if column == "NULL"
else column
)
col["measurement_unit"] = (
col["measurement_unit"]
if measurement_unit == "NULL"
else measurement_unit
)
col["has_sensitive_data"] = (
"no" if has_sensitive_data == "NULL" else has_sensitive_data
)
col["observations"] = (
col["observations"] if observations == "NULL" else observations
)
with open(self.table_folder / "table_config.yaml", "w", encoding="utf-8") as f:
ruamel.dump(table_config_yaml, f)
# regenerate publish.sql
self._make_publish_sql()
def init(
self,
data_sample_path=None,
if_folder_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns = False,
columns_config_url_or_path=None,
): # sourcery skip: low-code-quality
"""Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.
The folder should contain:
* `table_config.yaml`
* `publish.sql`
You can also point to a sample of the data to auto complete columns names.
Args:
data_sample_path (str, pathlib.PosixPath): Optional.
Data sample path to auto complete columns names
It supports Comma Delimited CSV, Apache Avro and
Apache Parquet.
if_folder_exists (str): Optional.
What to do if table folder exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace folder
* 'pass' : Do nothing
if_table_config_exists (str): Optional
What to do if table_config.yaml and publish.sql exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace files with blank template
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
Raises:
FileExistsError: If folder exists and replace is False.
NotImplementedError: If data sample is not in supported type or format.
"""
if not self.dataset_folder.exists():
raise FileExistsError(
f"Dataset folder {self.dataset_folder} folder does not exists. "
"Create a dataset before adding tables."
)
try:
self.table_folder.mkdir(exist_ok=(if_folder_exists == "replace"))
except FileExistsError as e:
if if_folder_exists == "raise":
raise FileExistsError(
f"Table folder already exists for {self.table_id}. "
) from e
if if_folder_exists == "pass":
return self
if not data_sample_path and if_table_config_exists != "pass":
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
partition_columns = []
if isinstance(
data_sample_path,
(
str,
Path,
),
):
# Check if partitioned and get data sample and partition columns
data_sample_path = Path(data_sample_path)
if data_sample_path.is_dir():
data_sample_path = [
f
for f in data_sample_path.glob("**/*")
if f.is_file() and f.suffix == f".{source_format}"
][0]
partition_columns = [
k.split("=")[0]
for k in data_sample_path.as_posix().split("/")
if "=" in k
]
columns = Datatype(self, source_format).header(data_sample_path)
else:
columns = ["column_name"]
if if_table_config_exists == "pass":
# Check if config files exists before passing
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
pass
# Raise if no sample to determine columns
elif not data_sample_path:
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
else:
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
elif if_table_config_exists == "raise":
# Check if config files already exist
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
raise FileExistsError(
f"table_config.yaml and publish.sql already exists at {self.table_folder}"
)
# if config files don't exist, create them
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
else:
# Raise: without a path to data sample, should not replace config files with empty template
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
if columns_config_url_or_path is not None:
self.update_columns(columns_config_url_or_path)
return self
def create(
self,
path=None,
force_dataset=True,
if_table_exists="raise",
if_storage_data_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns=False,
columns_config_url_or_path=None,
dataset_is_public=True,
location=None,
chunk_size=None,
):
"""Creates BigQuery table at staging dataset.
If you add a path, it automatically saves the data in the storage,
creates a datasets folder and BigQuery location, besides creating the
table and its configuration files.
The new table should be located at `<dataset_id>_staging.<table_id>` in BigQuery.
It looks for data saved in Storage at `<bucket_name>/staging/<dataset_id>/<table_id>/*`
and builds the table.
It currently supports the types:
- Comma Delimited CSV
- Apache Avro
- Apache Parquet
Data can also be partitioned following the hive partitioning scheme
`<key1>=<value1>/<key2>=<value2>` - for instance,
`year=2012/country=BR`. The partition is automatcally detected
by searching for `partitions` on the `table_config.yaml`.
Args:
path (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
job_config_params (dict): Optional.
Job configuration params from bigquery
if_table_exists (str): Optional
What to do if table exists
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
force_dataset (bool): Creates `<dataset_id>` folder and BigQuery Dataset if it doesn't exists.
if_table_config_exists (str): Optional.
What to do if config files already exist
* 'raise': Raises FileExistError
* 'replace': Replace with blank template
* 'pass'; Do nothing
if_storage_data_exists (str): Optional.
What to do if data already exists on your bucket:
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
dataset_is_public (bool): Control if prod dataset is public or not. By default staging datasets like `dataset_id_staging` are not public.
location (str): Optional. Location of dataset data.
List of possible region names locations: https://cloud.google.com/bigquery/docs/locations
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if path is None:
# Look if table data already exists at Storage
data = self.client["storage_staging"].list_blobs(
self.bucket_name, prefix=f"staging/{self.dataset_id}/{self.table_id}"
)
# Raise: Cannot create table without external data
if not data:
raise BaseDosDadosException(
"You must provide a path for uploading data"
)
# Add data to storage
if isinstance(
path,
(
str,
Path,
),
):
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
path,
mode="staging",
if_exists=if_storage_data_exists,
chunk_size=chunk_size,
)
# Create Dataset if it doesn't exist
if force_dataset:
dataset_obj = Dataset(self.dataset_id, **self.main_vars)
try:
dataset_obj.init()
except FileExistsError:
pass
dataset_obj.create(
if_exists="pass", location=location, dataset_is_public=dataset_is_public
)
self.init(
data_sample_path=path,
if_folder_exists="replace",
if_table_config_exists=if_table_config_exists,
columns_config_url_or_path=columns_config_url_or_path,
source_format=source_format,
force_columns=force_columns
)
table = bigquery.Table(self.table_full_name["staging"])
table.external_data_configuration = Datatype(
self, source_format, "staging", partitioned=self._is_partitioned()
).external_config
# Lookup if table alreay exists
table_ref = None
try:
table_ref = self.client["bigquery_staging"].get_table(
self.table_full_name["staging"]
)
except google.api_core.exceptions.NotFound:
pass
if isinstance(table_ref, google.cloud.bigquery.table.Table):
if if_table_exists == "pass":
return None
if if_table_exists == "raise":
raise FileExistsError(
"Table already exists, choose replace if you want to overwrite it"
)
if if_table_exists == "replace":
self.delete(mode="staging")
self.client["bigquery_staging"].create_table(table)
logger.success(
"{object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="created",
)
return None
def update(self, mode="all"):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ["prod", "staging"] if mode == "all" else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
# if m == "staging":
table.description = self._render_template(
Path("table/table_description.txt"), self.table_config
)
# save table description
with open(
self.metadata_path
/ self.dataset_id
/ self.table_id
/ "table_description.txt",
"w",
encoding="utf-8",
) as f:
f.write(table.description)
# when mode is staging the table schema already exists
table.schema = self._load_schema(m)
fields = ["description", "schema"] if m == "prod" else ["description"]
self.client[f"bigquery_{m}"].update_table(table, fields=fields)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="updated",
)
def publish(self, if_exists="raise"):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == "replace":
self.delete(mode="prod")
self.client["bigquery_prod"].query(
(self.table_folder / "publish.sql").open("r", encoding="utf-8").read()
).result()
self.update()
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="published",
)
def delete(self, mode):
"""Deletes table in BigQuery.
Args:
mode (str): Table of which table to delete [prod|staging]
"""
self._check_mode(mode)
if mode == "all":
for m, n in self.table_full_name[mode].items():
self.client[f"bigquery_{m}"].delete_table(n, not_found_ok=True)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
else:
self.client[f"bigquery_{mode}"].delete_table(
self.table_full_name[mode], not_found_ok=True
)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
def append(
self,
filepath,
partitions=None,
if_exists="replace",
chunk_size=None,
**upload_args,
):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists("staging"):
raise BaseDosDadosException(
"You cannot append to a table that does not exist"
)
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath,
mode="staging",
partitions=partitions,
if_exists=if_exists,
chunk_size=chunk_size,
**upload_args,
)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="appended",
)
|
flexible
|
{
"blob_id": "da218e6d9ee311eefb8e9ae4dac5053793eb5514",
"index": 9369,
"step-1": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n <mask token>\n <mask token>\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n <mask token>\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n <mask token>\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-2": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n <mask token>\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n <mask token>\n <mask token>\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-3": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n\n def _make_template(self, columns, partition_columns,\n if_table_config_exists, force_columns):\n self.metadata.create(if_exists=if_table_config_exists, columns=\n partition_columns + columns, partition_columns=\n partition_columns, force_columns=force_columns, table_only=False)\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load((self.table_folder /\n 'table_config.yaml').open(encoding='utf-8'))\n if ('https://docs.google.com/spreadsheets/d/' in\n columns_config_url_or_path):\n if ('edit#gid=' not in columns_config_url_or_path or \n 'https://docs.google.com/spreadsheets/d/' not in\n columns_config_url_or_path or not\n columns_config_url_or_path.split('=')[1].isdigit()):\n raise BaseDosDadosException(\n 'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split('.')[-1]\n if file_type == 'csv':\n df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')\n elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',\n 'odt']:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n 'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'\n )\n df = df.fillna('NULL')\n required_columns = ['name', 'bigquery_type', 'description',\n 'temporal_coverage', 'covered_by_dictionary',\n 'directory_column', 'measurement_unit', 'has_sensitive_data',\n 'observations']\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n columns_parameters = zip(*[df[required_column].tolist() for\n required_column in required_columns])\n for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:\n for col in table_config_yaml['columns']:\n if col['name'] == name:\n col['bigquery_type'] = col['bigquery_type'\n ] if bigquery_type == 'NULL' else bigquery_type.lower()\n col['description'] = col['description'\n ] if description == 'NULL' else description\n col['temporal_coverage'] = col['temporal_coverage'\n ] if temporal_coverage == 'NULL' else [\n temporal_coverage]\n col['covered_by_dictionary'] = ('no' if \n covered_by_dictionary == 'NULL' else\n covered_by_dictionary)\n dataset = directory_column.split('.')[0]\n col['directory_column']['dataset_id'] = col[\n 'directory_column']['dataset_id'\n ] if dataset == 'NULL' else dataset\n table = directory_column.split('.')[-1].split(':')[0]\n col['directory_column']['table_id'] = col[\n 'directory_column']['table_id'\n ] if table == 'NULL' else table\n column = directory_column.split('.')[-1].split(':')[-1]\n col['directory_column']['column_name'] = col[\n 'directory_column']['column_name'\n ] if column == 'NULL' else column\n col['measurement_unit'] = col['measurement_unit'\n ] if measurement_unit == 'NULL' else measurement_unit\n col['has_sensitive_data'] = ('no' if has_sensitive_data ==\n 'NULL' else has_sensitive_data)\n col['observations'] = col['observations'\n ] if observations == 'NULL' else observations\n with open(self.table_folder / 'table_config.yaml', 'w', encoding=\n 'utf-8') as f:\n ruamel.dump(table_config_yaml, f)\n self._make_publish_sql()\n\n def init(self, data_sample_path=None, if_folder_exists='raise',\n if_table_config_exists='raise', source_format='csv', force_columns=\n False, columns_config_url_or_path=None):\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n raise FileExistsError(\n f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'\n )\n try:\n self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')\n except FileExistsError as e:\n if if_folder_exists == 'raise':\n raise FileExistsError(\n f'Table folder already exists for {self.table_id}. '\n ) from e\n if if_folder_exists == 'pass':\n return self\n if not data_sample_path and if_table_config_exists != 'pass':\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n partition_columns = []\n if isinstance(data_sample_path, (str, Path)):\n data_sample_path = Path(data_sample_path)\n if data_sample_path.is_dir():\n data_sample_path = [f for f in data_sample_path.glob('**/*'\n ) if f.is_file() and f.suffix == f'.{source_format}'][0]\n partition_columns = [k.split('=')[0] for k in\n data_sample_path.as_posix().split('/') if '=' in k]\n columns = Datatype(self, source_format).header(data_sample_path)\n else:\n columns = ['column_name']\n if if_table_config_exists == 'pass':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n pass\n elif not data_sample_path:\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n elif if_table_config_exists == 'raise':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n raise FileExistsError(\n f'table_config.yaml and publish.sql already exists at {self.table_folder}'\n )\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n return self\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-4": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n\n def _make_template(self, columns, partition_columns,\n if_table_config_exists, force_columns):\n self.metadata.create(if_exists=if_table_config_exists, columns=\n partition_columns + columns, partition_columns=\n partition_columns, force_columns=force_columns, table_only=False)\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load((self.table_folder /\n 'table_config.yaml').open(encoding='utf-8'))\n if ('https://docs.google.com/spreadsheets/d/' in\n columns_config_url_or_path):\n if ('edit#gid=' not in columns_config_url_or_path or \n 'https://docs.google.com/spreadsheets/d/' not in\n columns_config_url_or_path or not\n columns_config_url_or_path.split('=')[1].isdigit()):\n raise BaseDosDadosException(\n 'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split('.')[-1]\n if file_type == 'csv':\n df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')\n elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',\n 'odt']:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n 'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'\n )\n df = df.fillna('NULL')\n required_columns = ['name', 'bigquery_type', 'description',\n 'temporal_coverage', 'covered_by_dictionary',\n 'directory_column', 'measurement_unit', 'has_sensitive_data',\n 'observations']\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n columns_parameters = zip(*[df[required_column].tolist() for\n required_column in required_columns])\n for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:\n for col in table_config_yaml['columns']:\n if col['name'] == name:\n col['bigquery_type'] = col['bigquery_type'\n ] if bigquery_type == 'NULL' else bigquery_type.lower()\n col['description'] = col['description'\n ] if description == 'NULL' else description\n col['temporal_coverage'] = col['temporal_coverage'\n ] if temporal_coverage == 'NULL' else [\n temporal_coverage]\n col['covered_by_dictionary'] = ('no' if \n covered_by_dictionary == 'NULL' else\n covered_by_dictionary)\n dataset = directory_column.split('.')[0]\n col['directory_column']['dataset_id'] = col[\n 'directory_column']['dataset_id'\n ] if dataset == 'NULL' else dataset\n table = directory_column.split('.')[-1].split(':')[0]\n col['directory_column']['table_id'] = col[\n 'directory_column']['table_id'\n ] if table == 'NULL' else table\n column = directory_column.split('.')[-1].split(':')[-1]\n col['directory_column']['column_name'] = col[\n 'directory_column']['column_name'\n ] if column == 'NULL' else column\n col['measurement_unit'] = col['measurement_unit'\n ] if measurement_unit == 'NULL' else measurement_unit\n col['has_sensitive_data'] = ('no' if has_sensitive_data ==\n 'NULL' else has_sensitive_data)\n col['observations'] = col['observations'\n ] if observations == 'NULL' else observations\n with open(self.table_folder / 'table_config.yaml', 'w', encoding=\n 'utf-8') as f:\n ruamel.dump(table_config_yaml, f)\n self._make_publish_sql()\n\n def init(self, data_sample_path=None, if_folder_exists='raise',\n if_table_config_exists='raise', source_format='csv', force_columns=\n False, columns_config_url_or_path=None):\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n raise FileExistsError(\n f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'\n )\n try:\n self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')\n except FileExistsError as e:\n if if_folder_exists == 'raise':\n raise FileExistsError(\n f'Table folder already exists for {self.table_id}. '\n ) from e\n if if_folder_exists == 'pass':\n return self\n if not data_sample_path and if_table_config_exists != 'pass':\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n partition_columns = []\n if isinstance(data_sample_path, (str, Path)):\n data_sample_path = Path(data_sample_path)\n if data_sample_path.is_dir():\n data_sample_path = [f for f in data_sample_path.glob('**/*'\n ) if f.is_file() and f.suffix == f'.{source_format}'][0]\n partition_columns = [k.split('=')[0] for k in\n data_sample_path.as_posix().split('/') if '=' in k]\n columns = Datatype(self, source_format).header(data_sample_path)\n else:\n columns = ['column_name']\n if if_table_config_exists == 'pass':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n pass\n elif not data_sample_path:\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n elif if_table_config_exists == 'raise':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n raise FileExistsError(\n f'table_config.yaml and publish.sql already exists at {self.table_folder}'\n )\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n return self\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n\n def delete(self, mode):\n \"\"\"Deletes table in BigQuery.\n\n Args:\n mode (str): Table of which table to delete [prod|staging]\n \"\"\"\n self._check_mode(mode)\n if mode == 'all':\n for m, n in self.table_full_name[mode].items():\n self.client[f'bigquery_{m}'].delete_table(n, not_found_ok=True)\n logger.info(' {object} {object_id}_{mode} was {action}!',\n object_id=self.table_id, mode=mode, object='Table', action=\n 'deleted')\n else:\n self.client[f'bigquery_{mode}'].delete_table(self.\n table_full_name[mode], not_found_ok=True)\n logger.info(' {object} {object_id}_{mode} was {action}!',\n object_id=self.table_id, mode=mode, object='Table', action=\n 'deleted')\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-5": "\"\"\"\nClass for manage tables in Storage and Big Query\n\"\"\"\n# pylint: disable=invalid-name, too-many-locals, too-many-branches, too-many-arguments,line-too-long,R0801,consider-using-f-string\nfrom pathlib import Path\nimport json\nfrom copy import deepcopy\nimport textwrap\nimport inspect\nfrom io import StringIO\n\nfrom loguru import logger\nfrom google.cloud import bigquery\nimport ruamel.yaml as ryaml\nimport requests\nimport pandas as pd\nimport google.api_core.exceptions\n\nfrom basedosdados.upload.base import Base\nfrom basedosdados.upload.storage import Storage\nfrom basedosdados.upload.dataset import Dataset\nfrom basedosdados.upload.datatypes import Datatype\nfrom basedosdados.upload.metadata import Metadata\nfrom basedosdados.exceptions import BaseDosDadosException\n\n\nclass Table(Base):\n \"\"\"\n Manage tables in Google Cloud Storage and BigQuery.\n \"\"\"\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n\n self.table_id = table_id.replace(\"-\", \"_\")\n self.dataset_id = dataset_id.replace(\"-\", \"_\")\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(\n prod=f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\",\n staging=f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\",\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / \"table_config.yaml\")\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")\n\n def _load_schema(self, mode=\"staging\"):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n\n ### publish.sql header and instructions\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n\n # remove triple quotes extra space\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n\n # add create table statement\n project_id_prod = self.client[\"bigquery_prod\"].project\n publish_txt += f\"\\n\\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\\nSELECT \\n\"\n\n # sort columns by is_partition, partitions_columns come first\n\n if self._is_partitioned():\n columns = sorted(\n self.table_config[\"columns\"],\n key=lambda k: (k[\"is_partition\"] is not None, k[\"is_partition\"]),\n reverse=True,\n )\n else:\n columns = self.table_config[\"columns\"]\n\n # add columns in publish.sql\n for col in columns:\n name = col[\"name\"]\n bigquery_type = (\n \"STRING\"\n if col[\"bigquery_type\"] is None\n else col[\"bigquery_type\"].upper()\n )\n\n publish_txt += f\"SAFE_CAST({name} AS {bigquery_type}) {name},\\n\"\n ## remove last comma\n publish_txt = publish_txt[:-2] + \"\\n\"\n\n # add from statement\n project_id_staging = self.client[\"bigquery_staging\"].project\n publish_txt += (\n f\"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t\"\n )\n\n # save publish.sql in table_folder\n (self.table_folder / \"publish.sql\").open(\"w\", encoding=\"utf-8\").write(\n publish_txt\n )\n\n def _make_template(self, columns, partition_columns, if_table_config_exists, force_columns):\n # create table_config.yaml with metadata\n self.metadata.create(\n if_exists=if_table_config_exists,\n columns=partition_columns + columns,\n partition_columns=partition_columns,\n force_columns=force_columns,\n table_only=False,\n )\n\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace(\"edit#gid=\", \"export?format=csv&gid=\")\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode(\"utf-8\")))\n except Exception as e:\n raise BaseDosDadosException(\n \"Check if your google sheet Share are: Anyone on the internet with this link can view\"\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load(\n (self.table_folder / \"table_config.yaml\").open(encoding=\"utf-8\")\n )\n\n if \"https://docs.google.com/spreadsheets/d/\" in columns_config_url_or_path:\n if (\n \"edit#gid=\" not in columns_config_url_or_path\n or \"https://docs.google.com/spreadsheets/d/\"\n not in columns_config_url_or_path\n or not columns_config_url_or_path.split(\"=\")[1].isdigit()\n ):\n raise BaseDosDadosException(\n \"The Google sheet url not in correct format.\"\n \"The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>\"\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split(\".\")[-1]\n if file_type == \"csv\":\n df = pd.read_csv(columns_config_url_or_path, encoding=\"utf-8\")\n elif file_type in [\"xls\", \"xlsx\", \"xlsm\", \"xlsb\", \"odf\", \"ods\", \"odt\"]:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n \"File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.\"\n )\n\n df = df.fillna(\"NULL\")\n\n required_columns = [\n \"name\",\n \"bigquery_type\",\n \"description\",\n \"temporal_coverage\",\n \"covered_by_dictionary\",\n \"directory_column\",\n \"measurement_unit\",\n \"has_sensitive_data\",\n \"observations\",\n ]\n\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n\n columns_parameters = zip(\n *[df[required_column].tolist() for required_column in required_columns]\n )\n for (\n name,\n bigquery_type,\n description,\n temporal_coverage,\n covered_by_dictionary,\n directory_column,\n measurement_unit,\n has_sensitive_data,\n observations,\n ) in columns_parameters:\n for col in table_config_yaml[\"columns\"]:\n if col[\"name\"] == name:\n col[\"bigquery_type\"] = (\n col[\"bigquery_type\"]\n if bigquery_type == \"NULL\"\n else bigquery_type.lower()\n )\n\n col[\"description\"] = (\n col[\"description\"] if description == \"NULL\" else description\n )\n\n col[\"temporal_coverage\"] = (\n col[\"temporal_coverage\"]\n if temporal_coverage == \"NULL\"\n else [temporal_coverage]\n )\n\n col[\"covered_by_dictionary\"] = (\n \"no\"\n if covered_by_dictionary == \"NULL\"\n else covered_by_dictionary\n )\n\n dataset = directory_column.split(\".\")[0]\n col[\"directory_column\"][\"dataset_id\"] = (\n col[\"directory_column\"][\"dataset_id\"]\n if dataset == \"NULL\"\n else dataset\n )\n\n table = directory_column.split(\".\")[-1].split(\":\")[0]\n col[\"directory_column\"][\"table_id\"] = (\n col[\"directory_column\"][\"table_id\"]\n if table == \"NULL\"\n else table\n )\n\n column = directory_column.split(\".\")[-1].split(\":\")[-1]\n col[\"directory_column\"][\"column_name\"] = (\n col[\"directory_column\"][\"column_name\"]\n if column == \"NULL\"\n else column\n )\n col[\"measurement_unit\"] = (\n col[\"measurement_unit\"]\n if measurement_unit == \"NULL\"\n else measurement_unit\n )\n\n col[\"has_sensitive_data\"] = (\n \"no\" if has_sensitive_data == \"NULL\" else has_sensitive_data\n )\n\n col[\"observations\"] = (\n col[\"observations\"] if observations == \"NULL\" else observations\n )\n\n with open(self.table_folder / \"table_config.yaml\", \"w\", encoding=\"utf-8\") as f:\n ruamel.dump(table_config_yaml, f)\n\n # regenerate publish.sql\n self._make_publish_sql()\n\n def init(\n self,\n data_sample_path=None,\n if_folder_exists=\"raise\",\n if_table_config_exists=\"raise\",\n source_format=\"csv\",\n force_columns = False,\n columns_config_url_or_path=None,\n ): # sourcery skip: low-code-quality\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n\n raise FileExistsError(\n f\"Dataset folder {self.dataset_folder} folder does not exists. \"\n \"Create a dataset before adding tables.\"\n )\n\n try:\n self.table_folder.mkdir(exist_ok=(if_folder_exists == \"replace\"))\n except FileExistsError as e:\n if if_folder_exists == \"raise\":\n raise FileExistsError(\n f\"Table folder already exists for {self.table_id}. \"\n ) from e\n if if_folder_exists == \"pass\":\n return self\n\n if not data_sample_path and if_table_config_exists != \"pass\":\n raise BaseDosDadosException(\n \"You must provide a path to correctly create config files\"\n )\n\n partition_columns = []\n if isinstance(\n data_sample_path,\n (\n str,\n Path,\n ),\n ):\n # Check if partitioned and get data sample and partition columns\n data_sample_path = Path(data_sample_path)\n\n if data_sample_path.is_dir():\n\n data_sample_path = [\n f\n for f in data_sample_path.glob(\"**/*\")\n if f.is_file() and f.suffix == f\".{source_format}\"\n ][0]\n\n partition_columns = [\n k.split(\"=\")[0]\n for k in data_sample_path.as_posix().split(\"/\")\n if \"=\" in k\n ]\n\n columns = Datatype(self, source_format).header(data_sample_path)\n\n else:\n\n columns = [\"column_name\"]\n\n if if_table_config_exists == \"pass\":\n # Check if config files exists before passing\n if (\n Path(self.table_folder / \"table_config.yaml\").is_file()\n and Path(self.table_folder / \"publish.sql\").is_file()\n ):\n pass\n # Raise if no sample to determine columns\n elif not data_sample_path:\n raise BaseDosDadosException(\n \"You must provide a path to correctly create config files\"\n )\n else:\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n elif if_table_config_exists == \"raise\":\n\n # Check if config files already exist\n if (\n Path(self.table_folder / \"table_config.yaml\").is_file()\n and Path(self.table_folder / \"publish.sql\").is_file()\n ):\n\n raise FileExistsError(\n f\"table_config.yaml and publish.sql already exists at {self.table_folder}\"\n )\n # if config files don't exist, create them\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n else:\n # Raise: without a path to data sample, should not replace config files with empty template\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n\n return self\n\n def create(\n self,\n path=None,\n force_dataset=True,\n if_table_exists=\"raise\",\n if_storage_data_exists=\"raise\",\n if_table_config_exists=\"raise\",\n source_format=\"csv\",\n force_columns=False,\n columns_config_url_or_path=None,\n dataset_is_public=True,\n location=None,\n chunk_size=None,\n ):\n \"\"\"Creates BigQuery table at staging dataset.\n\n If you add a path, it automatically saves the data in the storage,\n creates a datasets folder and BigQuery location, besides creating the\n table and its configuration files.\n\n The new table should be located at `<dataset_id>_staging.<table_id>` in BigQuery.\n\n It looks for data saved in Storage at `<bucket_name>/staging/<dataset_id>/<table_id>/*`\n and builds the table.\n\n It currently supports the types:\n\n - Comma Delimited CSV\n - Apache Avro\n - Apache Parquet\n\n Data can also be partitioned following the hive partitioning scheme\n `<key1>=<value1>/<key2>=<value2>` - for instance,\n `year=2012/country=BR`. The partition is automatcally detected\n by searching for `partitions` on the `table_config.yaml`.\n\n Args:\n path (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n job_config_params (dict): Optional.\n Job configuration params from bigquery\n if_table_exists (str): Optional\n What to do if table exists\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n force_dataset (bool): Creates `<dataset_id>` folder and BigQuery Dataset if it doesn't exists.\n if_table_config_exists (str): Optional.\n What to do if config files already exist\n\n * 'raise': Raises FileExistError\n * 'replace': Replace with blank template\n * 'pass'; Do nothing\n if_storage_data_exists (str): Optional.\n What to do if data already exists on your bucket:\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n dataset_is_public (bool): Control if prod dataset is public or not. By default staging datasets like `dataset_id_staging` are not public.\n\n location (str): Optional. Location of dataset data.\n List of possible region names locations: https://cloud.google.com/bigquery/docs/locations\n\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n\n if path is None:\n\n # Look if table data already exists at Storage\n data = self.client[\"storage_staging\"].list_blobs(\n self.bucket_name, prefix=f\"staging/{self.dataset_id}/{self.table_id}\"\n )\n\n # Raise: Cannot create table without external data\n if not data:\n raise BaseDosDadosException(\n \"You must provide a path for uploading data\"\n )\n\n # Add data to storage\n if isinstance(\n path,\n (\n str,\n Path,\n ),\n ):\n\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n path,\n mode=\"staging\",\n if_exists=if_storage_data_exists,\n chunk_size=chunk_size,\n )\n\n # Create Dataset if it doesn't exist\n if force_dataset:\n\n dataset_obj = Dataset(self.dataset_id, **self.main_vars)\n\n try:\n dataset_obj.init()\n except FileExistsError:\n pass\n\n dataset_obj.create(\n if_exists=\"pass\", location=location, dataset_is_public=dataset_is_public\n )\n\n self.init(\n data_sample_path=path,\n if_folder_exists=\"replace\",\n if_table_config_exists=if_table_config_exists,\n columns_config_url_or_path=columns_config_url_or_path,\n source_format=source_format,\n force_columns=force_columns\n )\n\n table = bigquery.Table(self.table_full_name[\"staging\"])\n table.external_data_configuration = Datatype(\n self, source_format, \"staging\", partitioned=self._is_partitioned()\n ).external_config\n\n # Lookup if table alreay exists\n table_ref = None\n try:\n table_ref = self.client[\"bigquery_staging\"].get_table(\n self.table_full_name[\"staging\"]\n )\n\n except google.api_core.exceptions.NotFound:\n pass\n\n if isinstance(table_ref, google.cloud.bigquery.table.Table):\n\n if if_table_exists == \"pass\":\n\n return None\n\n if if_table_exists == \"raise\":\n\n raise FileExistsError(\n \"Table already exists, choose replace if you want to overwrite it\"\n )\n\n if if_table_exists == \"replace\":\n\n self.delete(mode=\"staging\")\n\n self.client[\"bigquery_staging\"].create_table(table)\n\n logger.success(\n \"{object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"created\",\n )\n return None\n\n def update(self, mode=\"all\"):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )\n\n def publish(self, if_exists=\"raise\"):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n\n if if_exists == \"replace\":\n self.delete(mode=\"prod\")\n\n self.client[\"bigquery_prod\"].query(\n (self.table_folder / \"publish.sql\").open(\"r\", encoding=\"utf-8\").read()\n ).result()\n\n self.update()\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"published\",\n )\n\n def delete(self, mode):\n \"\"\"Deletes table in BigQuery.\n\n Args:\n mode (str): Table of which table to delete [prod|staging]\n \"\"\"\n\n self._check_mode(mode)\n\n if mode == \"all\":\n for m, n in self.table_full_name[mode].items():\n self.client[f\"bigquery_{m}\"].delete_table(n, not_found_ok=True)\n logger.info(\n \" {object} {object_id}_{mode} was {action}!\",\n object_id=self.table_id,\n mode=mode,\n object=\"Table\",\n action=\"deleted\",\n )\n else:\n self.client[f\"bigquery_{mode}\"].delete_table(\n self.table_full_name[mode], not_found_ok=True\n )\n\n logger.info(\n \" {object} {object_id}_{mode} was {action}!\",\n object_id=self.table_id,\n mode=mode,\n object=\"Table\",\n action=\"deleted\",\n )\n\n def append(\n self,\n filepath,\n partitions=None,\n if_exists=\"replace\",\n chunk_size=None,\n **upload_args,\n ):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists(\"staging\"):\n raise BaseDosDadosException(\n \"You cannot append to a table that does not exist\"\n )\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath,\n mode=\"staging\",\n partitions=partitions,\n if_exists=if_exists,\n chunk_size=chunk_size,\n **upload_args,\n )\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"appended\",\n )\n",
"step-ids": [
8,
12,
15,
16,
20
]
}
|
[
8,
12,
15,
16,
20
] |
import datetime
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming.txt') as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming1.txt', 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\guestbook.txt', 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
|
normal
|
{
"blob_id": "03da813650d56e7ab92885b698d4af3a51176903",
"index": 3878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\n<mask token>\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-3": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-4": "import datetime\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-5": "import datetime\n\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt') as f_obj:\n lines = f_obj.readlines()\n\nm_lines = []\n\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt', 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt', 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""empty message
Revision ID: 42cf7f6532dd
Revises: e6d4ac8564fb
Create Date: 2019-04-01 16:13:37.207305
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42cf7f6532dd'
down_revision = 'e6d4ac8564fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default="false"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('stakeholder', 'archived')
# ### end Alembic commands ###
|
normal
|
{
"blob_id": "42d9f40dd50056b1c258508a6cb3f9875680276a",
"index": 3393,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-3": "<mask token>\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 42cf7f6532dd\nRevises: e6d4ac8564fb\nCreate Date: 2019-04-01 16:13:37.207305\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default=\"false\"))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stakeholder', 'archived')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +
[candidate]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))
model = smf.ols(formula, data).fit()
print(selected)
return model
<|reserved_special_token_0|>
print(model)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +
[candidate]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))
model = smf.ols(formula, data).fit()
print(selected)
return model
dfBIG = pd.read_csv('C:\\Users\\family\\Desktop\\Big12and10.csv')
dfSEC = pd.read_csv('C:\\Users\\family\\Desktop\\SEC.csv')
dfPAC = pd.read_csv('C:\\Users\\family\\Desktop\\AtlanticCoast.csv')
df_Predict = pd.read_csv('C:\\Users\\family\\Desktop\\PredictV2.csv')
SecX = dfSEC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]
BigX = dfBIG[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]
PacX = dfPAC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS']]
PacY = dfPAC['AVG_YDS_SEASON']
SecY = dfSEC['AVG_YDS_SEASON']
BigY = dfBIG['AVG_YDS_SEASON']
PacZ = dfPAC['YDS_GAME']
BigZ = dfBIG['YDS_GAME']
SecZ = dfSEC['YDS_GAME']
PacJ = dfPAC['MAX_YDS_SEASON']
SecJ = dfSEC['MAX_YDS_SEASON']
BigJ = dfBIG['MAX_YDS_SEASON']
PacK = dfPAC['ROOKIE_YDS_GAME']
SecK = dfSEC['ROOKIE_YDS_GAME']
BigK = dfBIG['ROOKIE_YDS_GAME']
regPAC = sm.OLS(PacK, PacX)
resultsPAC = regPAC.fit()
SecX = SecX.to_numpy()
SecY = SecY.to_numpy()
model = backwardElimination(SecX, SecY, 0.05)
print(model)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xlrd
from enum import Enum
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.formula.api as smf
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +
[candidate]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))
model = smf.ols(formula, data).fit()
print(selected)
return model
dfBIG = pd.read_csv('C:\\Users\\family\\Desktop\\Big12and10.csv')
dfSEC = pd.read_csv('C:\\Users\\family\\Desktop\\SEC.csv')
dfPAC = pd.read_csv('C:\\Users\\family\\Desktop\\AtlanticCoast.csv')
df_Predict = pd.read_csv('C:\\Users\\family\\Desktop\\PredictV2.csv')
SecX = dfSEC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]
BigX = dfBIG[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]
PacX = dfPAC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',
'YDS_TA', 'BroadJump', 'TARGETS']]
PacY = dfPAC['AVG_YDS_SEASON']
SecY = dfSEC['AVG_YDS_SEASON']
BigY = dfBIG['AVG_YDS_SEASON']
PacZ = dfPAC['YDS_GAME']
BigZ = dfBIG['YDS_GAME']
SecZ = dfSEC['YDS_GAME']
PacJ = dfPAC['MAX_YDS_SEASON']
SecJ = dfSEC['MAX_YDS_SEASON']
BigJ = dfBIG['MAX_YDS_SEASON']
PacK = dfPAC['ROOKIE_YDS_GAME']
SecK = dfSEC['ROOKIE_YDS_GAME']
BigK = dfBIG['ROOKIE_YDS_GAME']
regPAC = sm.OLS(PacK, PacX)
resultsPAC = regPAC.fit()
SecX = SecX.to_numpy()
SecY = SecY.to_numpy()
model = backwardElimination(SecX, SecY, 0.05)
print(model)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xlrd
from enum import Enum
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.formula.api as smf
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = "{} ~ {} + 1".format(response,
' + '.join(selected + [candidate]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
formula = "{} ~ {} + 1".format(response,
' + '.join(selected))
model = smf.ols(formula, data).fit()
print(selected)
return model
# def backwardElimination(x, y, sl):
# numVars = len(x[0])
# for i in range(0, numVars):
# regressor_OLS = sm.OLS(y, x).fit()
# maxVar = max(regressor_OLS.pvalues).astype(float)
# if maxVar > sl:
# for j in range(0, numVars - i):
# if (regressor_OLS.pvalues[j].astype(float) == maxVar):
# x = (x, j, 1)
# regressor_OLS.summary()
# return x
dfBIG=pd.read_csv("C:\\Users\\family\\Desktop\\Big12and10.csv")
dfSEC=pd.read_csv("C:\\Users\\family\\Desktop\\SEC.csv")#- For SEC data
dfPAC=pd.read_csv("C:\\Users\\family\\Desktop\\AtlanticCoast.csv")#- For Atlantic Coast and Pac12
df_Predict=pd.read_csv("C:\\Users\\family\\Desktop\\PredictV2.csv")
#plt.scatter(dfBIG['DP'],dfBIG['YDS/GAME'])
SecX=dfSEC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']]# Works for SEC
BigX=dfBIG[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']] #Works for AtlanticCoast/Pac12 and Big 10/12
#PacX=dfPAC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']] #Works for AtlanticCoast/Pac12 and Big 10/12
PacX=dfPAC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS']] #Works for AtlanticCoast/Pac12 and Big 10/12
#PacX=dfPAC[['DP','CATCH_%','40YD','REC','TD','YDS/TA','TARGETS']] #Works for AtlanticCoast/Pac12 and Big 10/12
#PredictSecX=df_Predict[['DP','CATCH_%','YAC','YAC/COMP','40YD','REC','TARGETS','TD','YDS/TA','Broad Jump']]
PacY=dfPAC['AVG_YDS_SEASON']
SecY=dfSEC['AVG_YDS_SEASON']
BigY=dfBIG['AVG_YDS_SEASON']
PacZ=dfPAC['YDS_GAME']
BigZ=dfBIG['YDS_GAME']
SecZ=dfSEC['YDS_GAME']
PacJ=dfPAC['MAX_YDS_SEASON']
SecJ=dfSEC['MAX_YDS_SEASON']
BigJ=dfBIG['MAX_YDS_SEASON']
PacK=dfPAC['ROOKIE_YDS_GAME']
SecK=dfSEC['ROOKIE_YDS_GAME']
BigK=dfBIG['ROOKIE_YDS_GAME']
# PacK=dfPAC['ROOKIE_YDS']
# SecK=dfSEC['ROOKIE_YDS']
# BigK=dfBIG['ROOKIE_YDS']
# model=forward_selected(SecX,'ROOKIE_YDS')
# print(model)
# regrPac = linear_model.LinearRegression()
# regrSec=linear_model.LinearRegression()
# regrBig=linear_model.LinearRegression()
# regPAC=regrPac.fit(PacX, PacK)
# regSEC=regrSec.fit(SecX, SecK)
# SecX=sm.add_constant(SecX)
# regSEC=sm.OLS(SecK,SecX)
# regBIG=sm.OLS(BigK,BigX)
regPAC=sm.OLS(PacK,PacX)
# resultsSEC=regSEC.fit()
resultsPAC=regPAC.fit()
SecX=SecX.to_numpy()
SecY=SecY.to_numpy()
model=backwardElimination(SecX,SecY,0.05)
print(model)
# resultsBIG=regBIG.fit()
#model=forward_selected(PacX,'ROOKIE_YDS_GAME')
# for i in df_Predict.index:
# print(df_Predict['Conference'][i])
# if df_Predict['Conference'][i]=='Southeastern':
# print(df_Predict['Player'][i])
# pred=regrSec.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]])
# if pred<0:
# pred=0
# print('Predicted AVG_YDS/SEASON: \n', pred)
# if df_Predict['Conference'][i]=='Big':
# print(df_Predict['Player'][i])
# print('Predicted AVG_YDS/SEASON: \n', regrBig.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]]))
# if df_Predict['Conference'][i]=='Pac-12':
# print(df_Predict['Player'][i])
# pred=regrPac.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]])
# if pred<0:
# pred=0
# print('Predicted AVG_YDS/SEASON: \n', pred)
# print (resultsSEC.rsquared_adj)
# print(resultsSEC.summary())
#print (resultsPAC.rsquared_adj)
# print (resultsBIG.rsquared_adj)
# print(model.summary())
#print(model.rsquared_adj)
# print('AVG_YDS/GAME\n')
#print('Intercept: \n', regrSec.intercept_)
#print('Coefficients: \n', regrSec.coef_)
#print("R^2: \n",regSEC.score(pcaSecX,SecK))
#print("R^2: \n",regSEC.score(SecX,SecK))
# regPAC=regrPac.fit(PacX, PacZ)
# regBIG=regrBig.fit(BigX,BigZ)
# regSEC=regrSec.fit(SecX,SecY)
# print('YDS/GAME\n')
# print('Intercept: \n', regrPac.intercept_)
# print('Coefficients: \n', regrPac.coef_)
# print("R^2: \n",regPAC.score(PacX,PacZ) )
# regPAC=regrPac.fit(PacX,PacJ)
|
flexible
|
{
"blob_id": "a903f9c5cae1c2eb2f40dc8ba29f0625a3d34224",
"index": 9690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef forward_selected(data, response):\n \"\"\"Linear model designed by forward selection.\n\n Parameters:\n -----------\n data : pandas DataFrame with all possible predictors and response\n\n response: string, name of response column in data\n\n Returns:\n --------\n model: an \"optimal\" fitted statsmodels linear model\n with an intercept\n selected by forward selection\n evaluated by adjusted R-squared\n \"\"\"\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +\n [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))\n model = smf.ols(formula, data).fit()\n print(selected)\n return model\n\n\n<mask token>\nprint(model)\n",
"step-3": "<mask token>\n\n\ndef forward_selected(data, response):\n \"\"\"Linear model designed by forward selection.\n\n Parameters:\n -----------\n data : pandas DataFrame with all possible predictors and response\n\n response: string, name of response column in data\n\n Returns:\n --------\n model: an \"optimal\" fitted statsmodels linear model\n with an intercept\n selected by forward selection\n evaluated by adjusted R-squared\n \"\"\"\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +\n [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))\n model = smf.ols(formula, data).fit()\n print(selected)\n return model\n\n\ndfBIG = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\Big12and10.csv')\ndfSEC = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\SEC.csv')\ndfPAC = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\AtlanticCoast.csv')\ndf_Predict = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\PredictV2.csv')\nSecX = dfSEC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]\nBigX = dfBIG[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]\nPacX = dfPAC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS']]\nPacY = dfPAC['AVG_YDS_SEASON']\nSecY = dfSEC['AVG_YDS_SEASON']\nBigY = dfBIG['AVG_YDS_SEASON']\nPacZ = dfPAC['YDS_GAME']\nBigZ = dfBIG['YDS_GAME']\nSecZ = dfSEC['YDS_GAME']\nPacJ = dfPAC['MAX_YDS_SEASON']\nSecJ = dfSEC['MAX_YDS_SEASON']\nBigJ = dfBIG['MAX_YDS_SEASON']\nPacK = dfPAC['ROOKIE_YDS_GAME']\nSecK = dfSEC['ROOKIE_YDS_GAME']\nBigK = dfBIG['ROOKIE_YDS_GAME']\nregPAC = sm.OLS(PacK, PacX)\nresultsPAC = regPAC.fit()\nSecX = SecX.to_numpy()\nSecY = SecY.to_numpy()\nmodel = backwardElimination(SecX, SecY, 0.05)\nprint(model)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd\nfrom enum import Enum\nfrom sklearn import linear_model\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n\ndef forward_selected(data, response):\n \"\"\"Linear model designed by forward selection.\n\n Parameters:\n -----------\n data : pandas DataFrame with all possible predictors and response\n\n response: string, name of response column in data\n\n Returns:\n --------\n model: an \"optimal\" fitted statsmodels linear model\n with an intercept\n selected by forward selection\n evaluated by adjusted R-squared\n \"\"\"\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected +\n [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n formula = '{} ~ {} + 1'.format(response, ' + '.join(selected))\n model = smf.ols(formula, data).fit()\n print(selected)\n return model\n\n\ndfBIG = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\Big12and10.csv')\ndfSEC = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\SEC.csv')\ndfPAC = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\AtlanticCoast.csv')\ndf_Predict = pd.read_csv('C:\\\\Users\\\\family\\\\Desktop\\\\PredictV2.csv')\nSecX = dfSEC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]\nBigX = dfBIG[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS', 'ROOKIE_YDS_GAME']]\nPacX = dfPAC[['DP', 'CATCH_P', 'YAC', 'YAC_COMP', 'FORTYYD', 'REC', 'TD',\n 'YDS_TA', 'BroadJump', 'TARGETS']]\nPacY = dfPAC['AVG_YDS_SEASON']\nSecY = dfSEC['AVG_YDS_SEASON']\nBigY = dfBIG['AVG_YDS_SEASON']\nPacZ = dfPAC['YDS_GAME']\nBigZ = dfBIG['YDS_GAME']\nSecZ = dfSEC['YDS_GAME']\nPacJ = dfPAC['MAX_YDS_SEASON']\nSecJ = dfSEC['MAX_YDS_SEASON']\nBigJ = dfBIG['MAX_YDS_SEASON']\nPacK = dfPAC['ROOKIE_YDS_GAME']\nSecK = dfSEC['ROOKIE_YDS_GAME']\nBigK = dfBIG['ROOKIE_YDS_GAME']\nregPAC = sm.OLS(PacK, PacX)\nresultsPAC = regPAC.fit()\nSecX = SecX.to_numpy()\nSecY = SecY.to_numpy()\nmodel = backwardElimination(SecX, SecY, 0.05)\nprint(model)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd\nfrom enum import Enum\nfrom sklearn import linear_model\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\ndef forward_selected(data, response):\n \"\"\"Linear model designed by forward selection.\n\n Parameters:\n -----------\n data : pandas DataFrame with all possible predictors and response\n\n response: string, name of response column in data\n\n Returns:\n --------\n model: an \"optimal\" fitted statsmodels linear model\n with an intercept\n selected by forward selection\n evaluated by adjusted R-squared\n \"\"\"\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = \"{} ~ {} + 1\".format(response,\n ' + '.join(selected + [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n formula = \"{} ~ {} + 1\".format(response,\n ' + '.join(selected))\n model = smf.ols(formula, data).fit()\n\n print(selected)\n return model\n# def backwardElimination(x, y, sl):\n# numVars = len(x[0])\n# for i in range(0, numVars):\n# regressor_OLS = sm.OLS(y, x).fit()\n# maxVar = max(regressor_OLS.pvalues).astype(float)\n# if maxVar > sl:\n# for j in range(0, numVars - i):\n# if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n# x = (x, j, 1)\n# regressor_OLS.summary()\n# return x\n \n\n\n\n\ndfBIG=pd.read_csv(\"C:\\\\Users\\\\family\\\\Desktop\\\\Big12and10.csv\")\ndfSEC=pd.read_csv(\"C:\\\\Users\\\\family\\\\Desktop\\\\SEC.csv\")#- For SEC data\ndfPAC=pd.read_csv(\"C:\\\\Users\\\\family\\\\Desktop\\\\AtlanticCoast.csv\")#- For Atlantic Coast and Pac12\n\ndf_Predict=pd.read_csv(\"C:\\\\Users\\\\family\\\\Desktop\\\\PredictV2.csv\")\n#plt.scatter(dfBIG['DP'],dfBIG['YDS/GAME'])\n \nSecX=dfSEC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']]# Works for SEC \nBigX=dfBIG[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']] #Works for AtlanticCoast/Pac12 and Big 10/12\n#PacX=dfPAC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS','ROOKIE_YDS_GAME']] #Works for AtlanticCoast/Pac12 and Big 10/12\nPacX=dfPAC[['DP','CATCH_P','YAC','YAC_COMP','FORTYYD','REC','TD','YDS_TA','BroadJump','TARGETS']] #Works for AtlanticCoast/Pac12 and Big 10/12\n\n#PacX=dfPAC[['DP','CATCH_%','40YD','REC','TD','YDS/TA','TARGETS']] #Works for AtlanticCoast/Pac12 and Big 10/12\n\n#PredictSecX=df_Predict[['DP','CATCH_%','YAC','YAC/COMP','40YD','REC','TARGETS','TD','YDS/TA','Broad Jump']]\nPacY=dfPAC['AVG_YDS_SEASON']\nSecY=dfSEC['AVG_YDS_SEASON']\nBigY=dfBIG['AVG_YDS_SEASON']\nPacZ=dfPAC['YDS_GAME']\nBigZ=dfBIG['YDS_GAME']\nSecZ=dfSEC['YDS_GAME']\nPacJ=dfPAC['MAX_YDS_SEASON']\nSecJ=dfSEC['MAX_YDS_SEASON']\nBigJ=dfBIG['MAX_YDS_SEASON']\nPacK=dfPAC['ROOKIE_YDS_GAME']\nSecK=dfSEC['ROOKIE_YDS_GAME']\nBigK=dfBIG['ROOKIE_YDS_GAME']\n# PacK=dfPAC['ROOKIE_YDS']\n# SecK=dfSEC['ROOKIE_YDS']\n# BigK=dfBIG['ROOKIE_YDS']\n# model=forward_selected(SecX,'ROOKIE_YDS')\n# print(model)\n# regrPac = linear_model.LinearRegression()\n# regrSec=linear_model.LinearRegression()\n# regrBig=linear_model.LinearRegression()\n# regPAC=regrPac.fit(PacX, PacK)\n# regSEC=regrSec.fit(SecX, SecK)\n# SecX=sm.add_constant(SecX)\n# regSEC=sm.OLS(SecK,SecX)\n# regBIG=sm.OLS(BigK,BigX)\nregPAC=sm.OLS(PacK,PacX)\n# resultsSEC=regSEC.fit()\nresultsPAC=regPAC.fit()\nSecX=SecX.to_numpy()\nSecY=SecY.to_numpy()\nmodel=backwardElimination(SecX,SecY,0.05)\nprint(model)\n# resultsBIG=regBIG.fit()\n#model=forward_selected(PacX,'ROOKIE_YDS_GAME')\n\n# for i in df_Predict.index:\n# print(df_Predict['Conference'][i])\n# if df_Predict['Conference'][i]=='Southeastern':\n# print(df_Predict['Player'][i])\n# pred=regrSec.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]])\n# if pred<0:\n# pred=0\n# print('Predicted AVG_YDS/SEASON: \\n', pred)\n# if df_Predict['Conference'][i]=='Big':\n# print(df_Predict['Player'][i])\n# print('Predicted AVG_YDS/SEASON: \\n', regrBig.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]]))\n# if df_Predict['Conference'][i]=='Pac-12':\n# print(df_Predict['Player'][i])\n# pred=regrPac.predict([[df_Predict['DP'][i],df_Predict['CATCH_P'][i],df_Predict['YAC'][i],df_Predict['YAC_COMP'][i],df_Predict['40YD'][i],df_Predict['REC'][i],df_Predict['TD'][i],df_Predict['YDS/TA'][i],df_Predict['Broad Jump'][i]]])\n# if pred<0:\n# pred=0\n# print('Predicted AVG_YDS/SEASON: \\n', pred)\n\n# print (resultsSEC.rsquared_adj)\n# print(resultsSEC.summary())\n#print (resultsPAC.rsquared_adj)\n# print (resultsBIG.rsquared_adj)\n# print(model.summary())\n#print(model.rsquared_adj)\n# print('AVG_YDS/GAME\\n')\n#print('Intercept: \\n', regrSec.intercept_)\n#print('Coefficients: \\n', regrSec.coef_)\n#print(\"R^2: \\n\",regSEC.score(pcaSecX,SecK))\n#print(\"R^2: \\n\",regSEC.score(SecX,SecK))\n# regPAC=regrPac.fit(PacX, PacZ)\n# regBIG=regrBig.fit(BigX,BigZ)\n# regSEC=regrSec.fit(SecX,SecY)\n# print('YDS/GAME\\n')\n# print('Intercept: \\n', regrPac.intercept_)\n# print('Coefficients: \\n', regrPac.coef_)\n# print(\"R^2: \\n\",regPAC.score(PacX,PacZ) )\n# regPAC=regrPac.fit(PacX,PacJ)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def single_gpu_inference(sample, gpu):
raw_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'
% sample)
model_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'
)
out_file = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'
% sample)
assert os.path.exists(out_file)
offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)
with open(offset_file, 'r') as f:
offset_list = json.load(f)
input_shape = 40, 405, 405
output_shape = 32, 320, 320
prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)
postprocess = None
t_predict = time.time()
run_inference_n5(prediction, preprocess, postprocess, raw_path,
out_file, offset_list, input_key='data', target_keys='full_affs',
input_shape=input_shape, output_shape=output_shape, channel_order=[
list(range(19))])
t_predict = time.time() - t_predict
with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:
f.write('Inference with gpu %i in %f s' % (gpu, t_predict))
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def single_gpu_inference(sample, gpu):
raw_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'
% sample)
model_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'
)
out_file = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'
% sample)
assert os.path.exists(out_file)
offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)
with open(offset_file, 'r') as f:
offset_list = json.load(f)
input_shape = 40, 405, 405
output_shape = 32, 320, 320
prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)
postprocess = None
t_predict = time.time()
run_inference_n5(prediction, preprocess, postprocess, raw_path,
out_file, offset_list, input_key='data', target_keys='full_affs',
input_shape=input_shape, output_shape=output_shape, channel_order=[
list(range(19))])
t_predict = time.time() - t_predict
with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:
f.write('Inference with gpu %i in %f s' % (gpu, t_predict))
return True
if __name__ == '__main__':
sample = sys.argv[1]
gpu = int(sys.argv[2])
single_gpu_inference(sample, gpu)
<|reserved_special_token_1|>
import vigra
import os
import sys
import time
import json
from simpleference.inference.inference import run_inference_n5
from simpleference.backends.pytorch import InfernoPredict
from simpleference.backends.pytorch.preprocess import preprocess
def single_gpu_inference(sample, gpu):
raw_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'
% sample)
model_path = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'
)
out_file = (
'/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'
% sample)
assert os.path.exists(out_file)
offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)
with open(offset_file, 'r') as f:
offset_list = json.load(f)
input_shape = 40, 405, 405
output_shape = 32, 320, 320
prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)
postprocess = None
t_predict = time.time()
run_inference_n5(prediction, preprocess, postprocess, raw_path,
out_file, offset_list, input_key='data', target_keys='full_affs',
input_shape=input_shape, output_shape=output_shape, channel_order=[
list(range(19))])
t_predict = time.time() - t_predict
with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:
f.write('Inference with gpu %i in %f s' % (gpu, t_predict))
return True
if __name__ == '__main__':
sample = sys.argv[1]
gpu = int(sys.argv[2])
single_gpu_inference(sample, gpu)
<|reserved_special_token_1|>
import vigra
import os
import sys
import time
import json
from simpleference.inference.inference import run_inference_n5
# from simpleference.backends.pytorch import PyTorchPredict
from simpleference.backends.pytorch import InfernoPredict
from simpleference.backends.pytorch.preprocess import preprocess
def single_gpu_inference(sample, gpu):
raw_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5' % sample
model_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'
out_file = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5' % sample
assert os.path.exists(out_file)
offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)
with open(offset_file, 'r') as f:
offset_list = json.load(f)
input_shape = (40, 405, 405)
output_shape = (32, 320, 320)
prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)
postprocess = None
t_predict = time.time()
run_inference_n5(prediction,
preprocess,
postprocess,
raw_path,
out_file,
offset_list,
input_key='data',
target_keys='full_affs',
input_shape=input_shape,
output_shape=output_shape,
channel_order=[list(range(19))])
t_predict = time.time() - t_predict
with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:
f.write("Inference with gpu %i in %f s" % (gpu, t_predict))
return True
if __name__ == '__main__':
sample = sys.argv[1]
gpu = int(sys.argv[2])
single_gpu_inference(sample, gpu)
|
flexible
|
{
"blob_id": "5ca990bdcbe9378747e438015beb46760b1e987b",
"index": 7212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef single_gpu_inference(sample, gpu):\n raw_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'\n % sample)\n model_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'\n )\n out_file = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'\n % sample)\n assert os.path.exists(out_file)\n offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)\n with open(offset_file, 'r') as f:\n offset_list = json.load(f)\n input_shape = 40, 405, 405\n output_shape = 32, 320, 320\n prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)\n postprocess = None\n t_predict = time.time()\n run_inference_n5(prediction, preprocess, postprocess, raw_path,\n out_file, offset_list, input_key='data', target_keys='full_affs',\n input_shape=input_shape, output_shape=output_shape, channel_order=[\n list(range(19))])\n t_predict = time.time() - t_predict\n with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:\n f.write('Inference with gpu %i in %f s' % (gpu, t_predict))\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef single_gpu_inference(sample, gpu):\n raw_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'\n % sample)\n model_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'\n )\n out_file = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'\n % sample)\n assert os.path.exists(out_file)\n offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)\n with open(offset_file, 'r') as f:\n offset_list = json.load(f)\n input_shape = 40, 405, 405\n output_shape = 32, 320, 320\n prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)\n postprocess = None\n t_predict = time.time()\n run_inference_n5(prediction, preprocess, postprocess, raw_path,\n out_file, offset_list, input_key='data', target_keys='full_affs',\n input_shape=input_shape, output_shape=output_shape, channel_order=[\n list(range(19))])\n t_predict = time.time() - t_predict\n with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:\n f.write('Inference with gpu %i in %f s' % (gpu, t_predict))\n return True\n\n\nif __name__ == '__main__':\n sample = sys.argv[1]\n gpu = int(sys.argv[2])\n single_gpu_inference(sample, gpu)\n",
"step-4": "import vigra\nimport os\nimport sys\nimport time\nimport json\nfrom simpleference.inference.inference import run_inference_n5\nfrom simpleference.backends.pytorch import InfernoPredict\nfrom simpleference.backends.pytorch.preprocess import preprocess\n\n\ndef single_gpu_inference(sample, gpu):\n raw_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5'\n % sample)\n model_path = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'\n )\n out_file = (\n '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5'\n % sample)\n assert os.path.exists(out_file)\n offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)\n with open(offset_file, 'r') as f:\n offset_list = json.load(f)\n input_shape = 40, 405, 405\n output_shape = 32, 320, 320\n prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)\n postprocess = None\n t_predict = time.time()\n run_inference_n5(prediction, preprocess, postprocess, raw_path,\n out_file, offset_list, input_key='data', target_keys='full_affs',\n input_shape=input_shape, output_shape=output_shape, channel_order=[\n list(range(19))])\n t_predict = time.time() - t_predict\n with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:\n f.write('Inference with gpu %i in %f s' % (gpu, t_predict))\n return True\n\n\nif __name__ == '__main__':\n sample = sys.argv[1]\n gpu = int(sys.argv[2])\n single_gpu_inference(sample, gpu)\n",
"step-5": "import vigra\n\nimport os\nimport sys\nimport time\nimport json\n\nfrom simpleference.inference.inference import run_inference_n5\n# from simpleference.backends.pytorch import PyTorchPredict\nfrom simpleference.backends.pytorch import InfernoPredict\nfrom simpleference.backends.pytorch.preprocess import preprocess\n\n\ndef single_gpu_inference(sample, gpu):\n raw_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5' % sample\n model_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'\n out_file = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5' % sample\n assert os.path.exists(out_file)\n\n offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)\n with open(offset_file, 'r') as f:\n offset_list = json.load(f)\n\n input_shape = (40, 405, 405)\n output_shape = (32, 320, 320)\n prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)\n postprocess = None\n\n t_predict = time.time()\n run_inference_n5(prediction,\n preprocess,\n postprocess,\n raw_path,\n out_file,\n offset_list,\n input_key='data',\n target_keys='full_affs',\n input_shape=input_shape,\n output_shape=output_shape,\n channel_order=[list(range(19))])\n t_predict = time.time() - t_predict\n\n with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:\n f.write(\"Inference with gpu %i in %f s\" % (gpu, t_predict))\n return True\n\n\nif __name__ == '__main__':\n sample = sys.argv[1]\n gpu = int(sys.argv[2])\n single_gpu_inference(sample, gpu)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
import achemkit.properties_wnx
class TestDummy(unittest.TestCase):
pass
|
normal
|
{
"blob_id": "5f0e6f6dc645996b486f1292fe05229a7fae9b17",
"index": 2342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDummy(unittest.TestCase):\n pass\n",
"step-3": "import unittest\nimport achemkit.properties_wnx\n\n\nclass TestDummy(unittest.TestCase):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
<|reserved_special_token_0|>
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
<|reserved_special_token_0|>
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
<|reserved_special_token_1|>
from ipywidgets import Text, VBox, HBox, Label, Password, RadioButtons, Button, Layout, Box, Tab, Output, Dropdown, FloatText, BoundedIntText, Combobox
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,
Button, Layout, Box, Tab, Output, Dropdown,
FloatText, BoundedIntText, Combobox)
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(
options=[
("JRC RESTful API.", 0),
("Direct access to database and object storage.", 1)
],
value=source,
layout={'width': 'max-content'}
)
sources_box = Box([
Label(value="Data sources:"),
sources]
)
info_api = Label("RESTful API Settings.")
info_direct = Label("Direct access settings")
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options],
layout=Layout(border='1px solid black'))
info_general = Label(value="General settings:")
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(
value=values['api']['url'],
placeholder='Add URL',
description='API URL:',
disabled=False
)
wt_user = Text(
value=values['api']['user'],
placeholder='Username',
description='API User:',
disabled=False
)
wt_pass = Password(
value=values['api']['pass'],
placeholder='******',
description='API Password:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog("API information is updated")
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
# try:
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
# except:
# tab_box = Tab(children=[direct_conn()])
# tab_box.set_title(0, 'Connection')
# print("!WARNING! Can not load direct configuration settings.")
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(
options=[d for d in values['ds_conf']],
value=ds_def,
description='Default:',
disabled=False,
layout=Layout(width='200px')
)
dsy = Dropdown(
options=[int(y) for y in values['ds_conf'][dsc.value]['years']],
value=int(ds_dye),
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
btn_refresh = Button(
layout=Layout(width='35px'),
icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs',
tooltip="Configure this dataset")
bt_new = Button(layout=Layout(width='40px'), icon='plus',
tooltip="Add new dataset configuration")
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(
options=["1"],
value="1",
description='Database:',
disabled=False,
layout=Layout(width='140px')
)
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(
value=code_value,
placeholder='abc',
options=[m for m in data_options.eu_ms()]+[''],
description='AOI code:',
ensure_option=False,
disabled=False,
layout=Layout(width='200px'),
tooltip='Lowercase AOI code name for the dataset (5chr max).'
)
ds_year = BoundedIntText(
value=int(dsy.value),
min=1980,
max=2100,
step=1,
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
ds_desc = Text(
value=values['ds_conf'][dsc_value]['desc'],
description='Description:',
disabled=False
)
info_map_text = ["Set default map view options. ",
"You can get automatically the dataset ",
"center coordinates."]
lat, lon = values['ds_conf'][dsc_value]['center'].split(",")
map_cent_lat = FloatText(
value=float(lat),
description='Lat:',
disabled=False,
layout=Layout(width='160px')
)
map_cent_lon = FloatText(
value=float(lon),
description='Lon:',
disabled=False,
layout=Layout(width='160px')
)
map_zoom = BoundedIntText(
value=values['ds_conf'][dsc_value]['zoom'],
min=0,
max=20,
step=1,
description='Zoom:',
disabled=False,
layout=Layout(width='140px')
)
bt_get_center = Button(
layout=Layout(width='40px'),
icon='bullseye',
tooltip='Get center point from database.'
)
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label("Map center: "), map_cent_lat,
map_cent_lon, bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one.""")
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['dias_catalog'],
get_tb_list(), False),
description='DIAS catalog:',
disabled=False
)
tb_pr = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['parcels'],
get_tb_list(), False),
description='Parcels:',
disabled=False
)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['parcels_id'],
get_pr_columns(), False),
description='Parcels ID:',
disabled=False,
layout=Layout(width='180px')
)
tc_cn = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_names'],
get_pr_columns(), False),
description='Crop names:',
disabled=False,
layout=Layout(width='180px')
)
tc_cc = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_codes'],
get_pr_columns(), False),
description='Crop codes:',
disabled=False,
layout=Layout(width='180px')
)
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['s2'],
get_tb_list(), False),
description='S2 signatures:',
disabled=False
)
tb_bs = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['bs'],
get_tb_list(), False),
description='Backscattering:',
disabled=False
)
tb_6c = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['c6'],
get_tb_list(), False),
description='6 day coherence:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(
database.getTableCentroid(tb_pr.value)['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode,
'db'], str(ds_db.value))
config.update(['ds_conf', dscode,
'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f"{map_cent_lat.value},{map_cent_lon.value}")
config.update(['ds_conf', dscode,
'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog("The configurations are saved.")
return VBox([info_config, ds_box, parcel_box,
tb_dc, tb_s2, tb_bs, tb_6c,
Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog("Can not remove last configuration.")
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf']
[str(dsc.value)]['years']]
else:
outlog("Can not remove last configuration.")
wbox = VBox([Label("Datasets configurations."), dsc_box,
dsc_new_box, progress])
return wbox
|
flexible
|
{
"blob_id": "22afc6b9df87ef1eba284da20a807366278c24d4",
"index": 1343,
"step-1": "<mask token>\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\n<mask token>\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-2": "<mask token>\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\n<mask token>\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-3": "<mask token>\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\ndef direct():\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-4": "from ipywidgets import Text, VBox, HBox, Label, Password, RadioButtons, Button, Layout, Box, Tab, Output, Dropdown, FloatText, BoundedIntText, Combobox\nfrom cbm.utils import config, data_options\nfrom cbm.ipycbm.utils import settings\nfrom cbm.sources import database\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\ndef direct():\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nfrom ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,\n Button, Layout, Box, Tab, Output, Dropdown,\n FloatText, BoundedIntText, Combobox)\n\nfrom cbm.utils import config, data_options\nfrom cbm.ipycbm.utils import settings\nfrom cbm.sources import database\n\n\ndef widget_box():\n\n source = int(config.get_value(['set', 'data_source']))\n\n sources = RadioButtons(\n options=[\n (\"JRC RESTful API.\", 0),\n (\"Direct access to database and object storage.\", 1)\n ],\n value=source,\n layout={'width': 'max-content'}\n )\n\n sources_box = Box([\n Label(value=\"Data sources:\"),\n sources]\n )\n\n info_api = Label(\"RESTful API Settings.\")\n info_direct = Label(\"Direct access settings\")\n\n view_options = VBox([info_direct])\n\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n\n sources.observe(on_source_change, 'value')\n\n wbox_sources = VBox([sources_box, view_options],\n layout=Layout(border='1px solid black'))\n\n info_general = Label(value=\"General settings:\")\n\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n\n wt_url = Text(\n value=values['api']['url'],\n placeholder='Add URL',\n description='API URL:',\n disabled=False\n )\n wt_user = Text(\n value=values['api']['user'],\n placeholder='Username',\n description='API User:',\n disabled=False\n )\n wt_pass = Password(\n value=values['api']['pass'],\n placeholder='******',\n description='API Password:',\n disabled=False\n )\n\n wb_save = Button(\n description='Save',\n disabled=False,\n icon='save'\n )\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog(\"API information is updated\")\n\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n\n return wbox\n\n\ndef direct():\n # try:\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n# except:\n# tab_box = Tab(children=[direct_conn()])\n# tab_box.set_title(0, 'Connection')\n# print(\"!WARNING! Can not load direct configuration settings.\")\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n\n dsc = Dropdown(\n options=[d for d in values['ds_conf']],\n value=ds_def,\n description='Default:',\n disabled=False,\n layout=Layout(width='200px')\n )\n\n dsy = Dropdown(\n options=[int(y) for y in values['ds_conf'][dsc.value]['years']],\n value=int(ds_dye),\n description='Dataset year:',\n disabled=False,\n layout=Layout(width='180px')\n )\n\n btn_refresh = Button(\n layout=Layout(width='35px'),\n icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n\n bt_set = Button(layout=Layout(width='40px'), icon='cogs',\n tooltip=\"Configure this dataset\")\n bt_new = Button(layout=Layout(width='40px'), icon='plus',\n tooltip=\"Add new dataset configuration\")\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',\n tooltip='Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',\n tooltip='Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(\n options=[\"1\"],\n value=\"1\",\n description='Database:',\n disabled=False,\n layout=Layout(width='140px')\n )\n\n try:\n with open(f\"{config.get_value(['paths','temp'])}tb_prefix\", 'r') as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n\n ds_code = Combobox(\n value=code_value,\n placeholder='abc',\n options=[m for m in data_options.eu_ms()]+[''],\n description='AOI code:',\n ensure_option=False,\n disabled=False,\n layout=Layout(width='200px'),\n tooltip='Lowercase AOI code name for the dataset (5chr max).'\n )\n ds_year = BoundedIntText(\n value=int(dsy.value),\n min=1980,\n max=2100,\n step=1,\n description='Dataset year:',\n disabled=False,\n layout=Layout(width='180px')\n\n )\n ds_desc = Text(\n value=values['ds_conf'][dsc_value]['desc'],\n description='Description:',\n disabled=False\n )\n\n info_map_text = [\"Set default map view options. \",\n \"You can get automatically the dataset \",\n \"center coordinates.\"]\n\n lat, lon = values['ds_conf'][dsc_value]['center'].split(\",\")\n map_cent_lat = FloatText(\n value=float(lat),\n description='Lat:',\n disabled=False,\n layout=Layout(width='160px')\n )\n map_cent_lon = FloatText(\n value=float(lon),\n description='Lon:',\n disabled=False,\n layout=Layout(width='160px')\n )\n map_zoom = BoundedIntText(\n value=values['ds_conf'][dsc_value]['zoom'],\n min=0,\n max=20,\n step=1,\n description='Zoom:',\n disabled=False,\n layout=Layout(width='140px')\n )\n bt_get_center = Button(\n layout=Layout(width='40px'),\n icon='bullseye',\n tooltip='Get center point from database.'\n )\n\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label(\"Map center: \"), map_cent_lat,\n map_cent_lon, bt_get_center, map_zoom])\n\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\")\n\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n\n tb_dc = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['dias_catalog'],\n get_tb_list(), False),\n description='DIAS catalog:',\n disabled=False\n )\n tb_pr = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['parcels'],\n get_tb_list(), False),\n description='Parcels:',\n disabled=False\n )\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n\n tc_id = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['parcels_id'],\n get_pr_columns(), False),\n description='Parcels ID:',\n disabled=False,\n layout=Layout(width='180px')\n )\n tc_cn = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['crop_names'],\n get_pr_columns(), False),\n description='Crop names:',\n disabled=False,\n layout=Layout(width='180px')\n )\n tc_cc = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['crop_codes'],\n get_pr_columns(), False),\n description='Crop codes:',\n disabled=False,\n layout=Layout(width='180px')\n )\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n\n tb_s2 = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['s2'],\n get_tb_list(), False),\n description='S2 signatures:',\n disabled=False\n )\n tb_bs = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['bs'],\n get_tb_list(), False),\n description='Backscattering:',\n disabled=False\n )\n tb_6c = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['c6'],\n get_tb_list(), False),\n description='6 day coherence:',\n disabled=False\n )\n\n wb_save = Button(\n description='Save',\n disabled=False,\n icon='save'\n )\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(\n database.getTableCentroid(tb_pr.value)['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode,\n 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode,\n 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f\"{map_cent_lat.value},{map_cent_lon.value}\")\n config.update(['ds_conf', dscode,\n 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog(\"The configurations are saved.\")\n\n return VBox([info_config, ds_box, parcel_box,\n tb_dc, tb_s2, tb_bs, tb_6c,\n Label(''.join(info_map_text)), map_box, wb_save])\n\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog(\"Can not remove last configuration.\")\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf']\n [str(dsc.value)]['years']]\n else:\n outlog(\"Can not remove last configuration.\")\n\n wbox = VBox([Label(\"Datasets configurations.\"), dsc_box,\n dsc_new_box, progress])\n\n return wbox\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import logging
import os
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Testing constants for Bio2BEL FlyBase."""
import logging
import os
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
|
flexible
|
{
"blob_id": "bad719d968b4e358f863b7ef13bc12127f726806",
"index": 682,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog = logging.getLogger(__name__)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n",
"step-3": "<mask token>\nimport logging\nimport os\nlog = logging.getLogger(__name__)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n",
"step-4": "# -*- coding: utf-8 -*-\n\n\"\"\"Testing constants for Bio2BEL FlyBase.\"\"\"\n\nimport logging\nimport os\n\nlog = logging.getLogger(__name__)\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def some_func():
CFG.start_clock_module = datetime.datetime.now()
LOG.write_me('\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ')')
my_root_dir = os.getcwd()
list_output_dir = list()
list_of_files = list()
LOG.write_me("\t\tList of the files deleted from the 'OUTPUT' folders:")
for root, dirs, files in os.walk(my_root_dir):
if not str(root).endswith('ABACUS'):
if 'OUTPUT_' in str(root):
for file in files:
if str(file).endswith('.txt'):
rel_path_file = os.path.relpath(root, my_root_dir
) + '/' + file
LOG.write_me('\t\t- ' + rel_path_file)
path_file = root + '\\' + file
os.remove(path_file)
list_of_files.append(rel_path_file)
if len(list_of_files) == 0:
LOG.write_me('\t\t\t- No output file to clean')
elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)
LOG.write_me('\tEND - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')
LOG.write_me('')
LOG.write_me('')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def some_func():
CFG.start_clock_module = datetime.datetime.now()
LOG.write_me('\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ')')
my_root_dir = os.getcwd()
list_output_dir = list()
list_of_files = list()
LOG.write_me("\t\tList of the files deleted from the 'OUTPUT' folders:")
for root, dirs, files in os.walk(my_root_dir):
if not str(root).endswith('ABACUS'):
if 'OUTPUT_' in str(root):
for file in files:
if str(file).endswith('.txt'):
rel_path_file = os.path.relpath(root, my_root_dir
) + '/' + file
LOG.write_me('\t\t- ' + rel_path_file)
path_file = root + '\\' + file
os.remove(path_file)
list_of_files.append(rel_path_file)
if len(list_of_files) == 0:
LOG.write_me('\t\t\t- No output file to clean')
elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)
LOG.write_me('\tEND - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')
LOG.write_me('')
LOG.write_me('')
if __name__ == '__main__':
some_func()
<|reserved_special_token_1|>
import _cfg_GLOBAL as CFG
import os
import LOG
import UTILITY as UTL
import datetime
def some_func():
CFG.start_clock_module = datetime.datetime.now()
LOG.write_me('\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ')')
my_root_dir = os.getcwd()
list_output_dir = list()
list_of_files = list()
LOG.write_me("\t\tList of the files deleted from the 'OUTPUT' folders:")
for root, dirs, files in os.walk(my_root_dir):
if not str(root).endswith('ABACUS'):
if 'OUTPUT_' in str(root):
for file in files:
if str(file).endswith('.txt'):
rel_path_file = os.path.relpath(root, my_root_dir
) + '/' + file
LOG.write_me('\t\t- ' + rel_path_file)
path_file = root + '\\' + file
os.remove(path_file)
list_of_files.append(rel_path_file)
if len(list_of_files) == 0:
LOG.write_me('\t\t\t- No output file to clean')
elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)
LOG.write_me('\tEND - CLEAN.py (' + datetime.datetime.now().strftime(
'%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')
LOG.write_me('')
LOG.write_me('')
if __name__ == '__main__':
some_func()
<|reserved_special_token_1|>
############################################-############################################
################################ F I L E A U T H O R S ################################
# MIKE - see contacts in _doc_PACKAGE_DESCRIPTION
####################################### A B O U T #######################################
# In this module:
# I clean the out put directories
####################################### S T A R T #######################################
import _cfg_GLOBAL as CFG
import os
import LOG
import UTILITY as UTL
import datetime
def some_func():
CFG.start_clock_module = datetime.datetime.now()
LOG.write_me("\tSTART - CLEAN.py (" + datetime.datetime.now().strftime("%y-%m-%d | %H:%M") + ")")
my_root_dir = os.getcwd()
list_output_dir = list()
list_of_files = list()
LOG.write_me("\t\tList of the files deleted from the 'OUTPUT' folders:")
for root, dirs, files in os.walk(my_root_dir):
if not str(root).endswith("ABACUS"):
if "OUTPUT_" in str(root):
for file in files:
if str(file).endswith(".txt"):
rel_path_file = os.path.relpath(root, my_root_dir) + "/" + file
LOG.write_me("\t\t- " + rel_path_file )
path_file = root + "\\" + file
os.remove(path_file)
list_of_files.append(rel_path_file)
if len(list_of_files) == 0:
LOG.write_me("\t\t\t- No output file to clean")
elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)
LOG.write_me("\tEND - CLEAN.py (" + datetime.datetime.now().strftime("%y-%m-%d | %H:%M") + " | hh.mm.ss.ms " + elapsed_formatted + ")")
LOG.write_me("")
LOG.write_me("")
if __name__ == '__main__':
some_func()
|
flexible
|
{
"blob_id": "58667da8898c2277ecc3d9d738d6553dd3416436",
"index": 7323,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef some_func():\n CFG.start_clock_module = datetime.datetime.now()\n LOG.write_me('\\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ')')\n my_root_dir = os.getcwd()\n list_output_dir = list()\n list_of_files = list()\n LOG.write_me(\"\\t\\tList of the files deleted from the 'OUTPUT' folders:\")\n for root, dirs, files in os.walk(my_root_dir):\n if not str(root).endswith('ABACUS'):\n if 'OUTPUT_' in str(root):\n for file in files:\n if str(file).endswith('.txt'):\n rel_path_file = os.path.relpath(root, my_root_dir\n ) + '/' + file\n LOG.write_me('\\t\\t- ' + rel_path_file)\n path_file = root + '\\\\' + file\n os.remove(path_file)\n list_of_files.append(rel_path_file)\n if len(list_of_files) == 0:\n LOG.write_me('\\t\\t\\t- No output file to clean')\n elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)\n LOG.write_me('\\tEND - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')\n LOG.write_me('')\n LOG.write_me('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef some_func():\n CFG.start_clock_module = datetime.datetime.now()\n LOG.write_me('\\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ')')\n my_root_dir = os.getcwd()\n list_output_dir = list()\n list_of_files = list()\n LOG.write_me(\"\\t\\tList of the files deleted from the 'OUTPUT' folders:\")\n for root, dirs, files in os.walk(my_root_dir):\n if not str(root).endswith('ABACUS'):\n if 'OUTPUT_' in str(root):\n for file in files:\n if str(file).endswith('.txt'):\n rel_path_file = os.path.relpath(root, my_root_dir\n ) + '/' + file\n LOG.write_me('\\t\\t- ' + rel_path_file)\n path_file = root + '\\\\' + file\n os.remove(path_file)\n list_of_files.append(rel_path_file)\n if len(list_of_files) == 0:\n LOG.write_me('\\t\\t\\t- No output file to clean')\n elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)\n LOG.write_me('\\tEND - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')\n LOG.write_me('')\n LOG.write_me('')\n\n\nif __name__ == '__main__':\n some_func()\n",
"step-4": "import _cfg_GLOBAL as CFG\nimport os\nimport LOG\nimport UTILITY as UTL\nimport datetime\n\n\ndef some_func():\n CFG.start_clock_module = datetime.datetime.now()\n LOG.write_me('\\tSTART - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ')')\n my_root_dir = os.getcwd()\n list_output_dir = list()\n list_of_files = list()\n LOG.write_me(\"\\t\\tList of the files deleted from the 'OUTPUT' folders:\")\n for root, dirs, files in os.walk(my_root_dir):\n if not str(root).endswith('ABACUS'):\n if 'OUTPUT_' in str(root):\n for file in files:\n if str(file).endswith('.txt'):\n rel_path_file = os.path.relpath(root, my_root_dir\n ) + '/' + file\n LOG.write_me('\\t\\t- ' + rel_path_file)\n path_file = root + '\\\\' + file\n os.remove(path_file)\n list_of_files.append(rel_path_file)\n if len(list_of_files) == 0:\n LOG.write_me('\\t\\t\\t- No output file to clean')\n elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)\n LOG.write_me('\\tEND - CLEAN.py (' + datetime.datetime.now().strftime(\n '%y-%m-%d | %H:%M') + ' | hh.mm.ss.ms ' + elapsed_formatted + ')')\n LOG.write_me('')\n LOG.write_me('')\n\n\nif __name__ == '__main__':\n some_func()\n",
"step-5": "############################################-############################################\n################################ F I L E A U T H O R S ################################\n# MIKE - see contacts in _doc_PACKAGE_DESCRIPTION\n\n####################################### A B O U T #######################################\n# In this module:\n# I clean the out put directories\n\n####################################### S T A R T #######################################\n\nimport _cfg_GLOBAL as CFG\nimport os\nimport LOG\nimport UTILITY as UTL\nimport datetime\n\n\ndef some_func():\n CFG.start_clock_module = datetime.datetime.now()\n LOG.write_me(\"\\tSTART - CLEAN.py (\" + datetime.datetime.now().strftime(\"%y-%m-%d | %H:%M\") + \")\")\n\n my_root_dir = os.getcwd()\n list_output_dir = list()\n list_of_files = list()\n\n LOG.write_me(\"\\t\\tList of the files deleted from the 'OUTPUT' folders:\")\n for root, dirs, files in os.walk(my_root_dir):\n if not str(root).endswith(\"ABACUS\"):\n if \"OUTPUT_\" in str(root):\n for file in files:\n if str(file).endswith(\".txt\"):\n rel_path_file = os.path.relpath(root, my_root_dir) + \"/\" + file\n LOG.write_me(\"\\t\\t- \" + rel_path_file )\n path_file = root + \"\\\\\" + file\n os.remove(path_file)\n list_of_files.append(rel_path_file)\n if len(list_of_files) == 0:\n LOG.write_me(\"\\t\\t\\t- No output file to clean\")\n elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module)\n LOG.write_me(\"\\tEND - CLEAN.py (\" + datetime.datetime.now().strftime(\"%y-%m-%d | %H:%M\") + \" | hh.mm.ss.ms \" + elapsed_formatted + \")\")\n LOG.write_me(\"\")\n LOG.write_me(\"\")\nif __name__ == '__main__':\n some_func()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, request
import matplotlib.pyplot as plt
import numpy as np
import sympy
from DerivTest import diff, diff2, trapz
from sympy.parsing.sympy_parser import parse_expr
from sympy import Symbol
#from ParsingClass import Parser
#from scitools.StringFunction import StringFunction
#from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):
print("printing user input from functionGraph - " + function)
print(dVal1, dVal2, dVal3, dVal4)
#parser = Parser()
#x=np.array(range(10))
x1 = -5;
x2 = 5;
print("1st input:")
y=function
def f(x):
return eval(y)
'''print("Domain Val 1:")
x1 = float(input())
print("Domain Val 2:")
x2 = float(input())
print("Range Val 1:")
y1 = float(input())
print("Range Val 2:")
y2 = float(input())
'''
x1=int(dVal1)
x2=int(dVal2)
y1=int(dVal3)
y2=int(dVal4)
print("Processing...")
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
yParsed = parse_expr(y, evaluate=False)
n, d = yParsed.as_numer_denom()
#s = Symbol('s', real = True)
undef = sympy.solve(d)
numzero = sympy.solve(n)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count+1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2,2,1)
ax1.plot(xVal1, yVal1, 'g')
for x in undef:
if x not in numzero:
try:
ax1.axvline(x=x, linestyle = '--')
except:
pass
else:
x=x+0.01
ax1.plot(x, eval(y), "o", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None')
count = 0
'''for zero in numzero:
if zero in undef:
ax1.plot(zero, f(zero), marker='s', color='green')
count = count + 1'''
#ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
#plt.axis([0,6,0,30])
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight')
#############################################
# Relative Extrema
#############################################
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
# ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if (yVal2[count - 1]>0 and yVal2[count + 1]<0):
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight')
plt.clf()
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1,'g')
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
# ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
count = 1
limit = len(yVal2) - 1
for z in yVal2:
if count == limit:
break
if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0):
ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal1[count], linestyle='--')
count = count + 1
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight')
plt.clf()
#############################################
# First Derivative
#############################################
xRange1 = np.arange(x1,x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count+1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2,2,1)
ax1.plot(xVal1, yVal1, 'g')
#ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y,x)
count = count+1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
#ax2.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight')
#############################################
# SECOND DERIVATIVE
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
'''for x in np.nditer(xRange3):
yRange3[count] = diff2(y, x)
count = count + 1'''
count = 1
limit = yRange2.size-1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count-1], yRange2[count+1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
print("XXXXXXXXXX")
for x in xVal3:
print (x)
print("YYYYYYYYYY")
for yVal in yVal3:
print (yVal)
ax1.plot(xVal3, yVal3, 'b')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight')
plt.clf
#############################################
#POINTS OF INFLECTION
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange2 = np.arange(x1, x2, 0.01)
count = 0
yRange2 = np.empty(xRange2.size)
for x in np.nditer(xRange2):
yRange2[count] = diff(y, x)
count = count + 1
xVal2 = xRange2.tolist()
yVal2 = yRange2.tolist()
ax1.plot(xVal2, yVal2, 'r', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
xRange3 = np.arange(x1, x2, 0.01)
yRange3 = np.empty(xRange3.size)
count = 1
limit = yRange2.size - 1
for x in np.nditer(xRange3):
if count == limit:
break
yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])
count = count + 1
np.delete(xRange3, -1)
np.delete(yRange3, -1)
xVal3 = xRange3.tolist()
yVal3 = yRange3.tolist()
ax1.plot(xVal3, yVal3, 'b', alpha=0.2)
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
count = 1
limit = len(yVal2) - 1
for z in yVal3:
if count == limit:
break
if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:
points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')
ax1.axvline(x=xVal2[count], linestyle='--')
count = count + 1
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight')
plt.clf()
#############################################
# FTC
#############################################
xRange1 = np.arange(x1, x2, 0.01)
yRange1 = np.empty(xRange1.size)
count = 0
n, d = yParsed.as_numer_denom()
undef = sympy.solve(d)
plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.autoscale(False)
for x in np.nditer(xRange1):
yRange1[count] = eval(y)
count = count + 1
xVal1 = xRange1.tolist()
yVal1 = yRange1.tolist()
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xVal1, yVal1, 'g')
n, d = yParsed.as_numer_denom()
s = Symbol('s', real=True)
undef = sympy.solve(d, s)
for xc in undef:
ax1.axvline(x=xc, linestyle='--')
'''
print("Integration x1:")
x1int = float(input())
print("Integration x2:")
x2int = float(input())
'''
x1int = int(ftcVal1)
x2int = int(ftcVal2)
print("Processing...")
sectionx = np.arange(x1int, x2int, 0.00001)
sectiony = np.empty(sectionx.size)
count = 0
for x in np.nditer(sectionx):
sectiony[count] = eval(y)
count = count+1
plt.fill_between(sectionx, sectiony)
global area
area = 0
count = 0
limit = sectionx.size-1
for x in np.nditer(sectionx):
if(count == limit):
break
trapSum = trapz(sectiony[count], sectiony[count+1])
area = area + trapSum
count = count + 1
print(area)
# ax1.set_aspect('equal')
ax1.grid(True, which='both')
ax1.axhline(y=0, color='k')
ax1.axvline(x=0, color='k')
if d == 1:
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.xlim(left=x1, right=x2)
plt.ylim(top=y2, bottom=y1)
plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight')
global area
x1 = -5;
x2 = 5;
xRange1 = np.arange(x1,x2, 0.01)
#print("1st input")
#y=input()
#yParsed = parse_expr(y, evaluate=False)
#functionGraph(y)
def testFunc(inp):
print("printing user input from testFunc - " +inp)
pass
##############################################
#works on CHROME ONLY, caching issue in Safari
##############################################
@app.route('/', methods=['GET', 'POST'])
@app.route('/graph', methods=['GET', 'POST'])
def graph():
if request.method == 'POST':
func = request.form['Function']
dVal1 = request.form['dVal1']
dVal2 = request.form['dVal2']
dVal3 = request.form['dVal3']
dVal4 = request.form['dVal4']
ftcVal1 = request.form['ftcVal1']
ftcVal2 = request.form['ftcVal2']
functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)
print("user input = " +str(input))
#testFunc(input)
return render_template("graph.html")
#return render_template("graph.html", result=input)
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/input', methods=['GET', 'POST'])
def input():
return render_template('input.html')
'''@app.route('/input', methods=['GET', 'POST'])
def input_post():
if request.method == 'POST':
result = request.form['Function']
print(result)
return render_template("graph.html", result=result)'''
@app.route('/der', methods=['GET', 'POST'])
def derGraph():
return render_template('graph2.html')
@app.route('/der2', methods=['GET', 'POST'])
def der2Graph():
return render_template('graph3.html')
@app.route('/relmax', methods=['GET', 'POST'])
def relmax():
return render_template('relmax.html')
@app.route('/relmin', methods=['GET', 'POST'])
def relmin():
return render_template('relmin.html')
@app.route('/poi', methods=['GET', 'POST'])
def poi():
return render_template('poi.html')
@app.route('/ftc', methods=['GET', 'POST'])
def ftc():
global area
return render_template('ftc.html', result = str(area))
@app.route('/in1', methods=['GET', 'POST'])
def in1():
return render_template('in1.html')
@app.route('/out1', methods=['GET', 'POST'])
def out1():
return render_template('out1.html')
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)
|
normal
|
{
"blob_id": "9dc8449bcc0c6c6ffb5ced5724ca632b6578bf1b",
"index": 9170,
"step-1": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n<mask token>\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n<mask token>\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n",
"step-4": "<mask token>\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\nx1 = -5\nx2 = 5\nxRange1 = np.arange(x1, x2, 0.01)\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n",
"step-5": "from flask import Flask, render_template, request\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy\nfrom DerivTest import diff, diff2, trapz\nfrom sympy.parsing.sympy_parser import parse_expr\nfrom sympy import Symbol\n#from ParsingClass import Parser\n#from scitools.StringFunction import StringFunction\n#from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print(\"printing user input from functionGraph - \" + function)\n print(dVal1, dVal2, dVal3, dVal4)\n #parser = Parser()\n #x=np.array(range(10))\n x1 = -5;\n x2 = 5;\n print(\"1st input:\")\n y=function\n def f(x):\n return eval(y)\n '''print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n '''\n\n x1=int(dVal1)\n x2=int(dVal2)\n y1=int(dVal3)\n y2=int(dVal4)\n\n print(\"Processing...\")\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n #s = Symbol('s', real = True)\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle = '--')\n except:\n pass\n else:\n x=x+0.01\n ax1.plot(x, eval(y), \"o\", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None')\n count = 0\n '''for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1'''\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n #plt.axis([0,6,0,30])\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight')\n\n #############################################\n # Relative Extrema\n #############################################\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1]>0 and yVal2[count + 1]<0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight')\n plt.clf()\n\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1,'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight')\n plt.clf()\n\n\n #############################################\n # First Derivative\n #############################################\n\n xRange1 = np.arange(x1,x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y,x)\n count = count+1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n #ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight')\n\n #############################################\n # SECOND DERIVATIVE\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n '''for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1'''\n count = 1\n limit = yRange2.size-1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count-1], yRange2[count+1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print(\"XXXXXXXXXX\")\n for x in xVal3:\n print (x)\n print(\"YYYYYYYYYY\")\n for yVal in yVal3:\n print (yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight')\n plt.clf\n #############################################\n #POINTS OF INFLECTION\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight')\n plt.clf()\n\n #############################################\n # FTC\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n '''\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n '''\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print(\"Processing...\")\n sectionx = np.arange(x1int, x2int, 0.00001)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count+1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size-1\n for x in np.nditer(sectionx):\n if(count == limit):\n break\n trapSum = trapz(sectiony[count], sectiony[count+1])\n area = area + trapSum\n count = count + 1\n print(area)\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight')\n\nglobal area\n\nx1 = -5;\nx2 = 5;\nxRange1 = np.arange(x1,x2, 0.01)\n#print(\"1st input\")\n#y=input()\n#yParsed = parse_expr(y, evaluate=False)\n#functionGraph(y)\n\ndef testFunc(inp):\n print(\"printing user input from testFunc - \" +inp)\n pass\n\n##############################################\n#works on CHROME ONLY, caching issue in Safari\n##############################################\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n\n print(\"user input = \" +str(input))\n\n\n #testFunc(input)\n return render_template(\"graph.html\")\n #return render_template(\"graph.html\", result=input)\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n'''@app.route('/input', methods=['GET', 'POST'])\ndef input_post():\n if request.method == 'POST':\n result = request.form['Function']\n print(result)\n return render_template(\"graph.html\", result=result)'''\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result = str(area))\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n\n\n",
"step-ids": [
10,
13,
15,
16,
18
]
}
|
[
10,
13,
15,
16,
18
] |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = "Submission State Change"
ACTIVITY_TYPE_GRADE = "Submission Grade Change"
def submissions_to_user_submission_activities_dfs(
submissions_df: DataFrame,
) -> Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert "submissionHistory" in submissions_df.columns
assert "id" in submissions_df.columns
assert "courseId" in submissions_df.columns
assert "courseWorkId" in submissions_df.columns
# convert json-like submissionHistory string to list of dicts
submissions_df["submissionHistory"] = submissions_df["submissionHistory"].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df["AssignmentIdentifier"] = submissions_df[
["courseId", "courseWorkId"]
].agg("-".join, axis=1)
submissions_df = submissions_df[["id", "courseId", "courseWorkId", "submissionHistory", "AssignmentIdentifier", "CreateDate", "LastModifiedDate"]]
# explode submissionHistory lists into rows with other columns duplicated
history_df = submissions_df.explode(column="submissionHistory") # type: ignore
# expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns
history_df = history_df["submissionHistory"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')
history_df.drop(columns=["submissionHistory"], inplace=True)
# expand stateHistory (can assume exists, should always have at least one "CREATED" entry)
user_submission_df = concat([history_df, history_df["stateHistory"].apply(Series)], axis=1)
user_submission_df.dropna(subset=["stateHistory"], inplace=True)
# enrich stateHistory
user_submission_df["SourceSystemIdentifier"] = "S-" + user_submission_df[
["courseId", "courseWorkId", "id", "stateTimestamp"]
].agg("-".join, axis=1)
user_submission_df = user_submission_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"stateTimestamp",
"state",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
user_submission_df = user_submission_df.rename(
columns={
"stateTimestamp": "ActivityDateTime",
"state": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
user_submission_df["ActivityType"] = ACTIVITY_TYPE_STATE
# expand gradeHistory if exists
if "gradeHistory" in history_df:
grade_history_df = concat([history_df, history_df["gradeHistory"].apply(Series)], axis=1)
grade_history_df.dropna(subset=["gradeHistory"], inplace=True)
# enrich gradeHistory
grade_history_df["SourceSystemIdentifier"] = "G-" + grade_history_df[
["courseId", "courseWorkId", "id", "gradeTimestamp"]
].agg("-".join, axis=1)
grade_history_df = grade_history_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"gradeTimestamp",
"gradeChangeType",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
grade_history_df = grade_history_df.rename(
columns={
"gradeTimestamp": "ActivityDateTime",
"gradeChangeType": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
grade_history_df["ActivityType"] = ACTIVITY_TYPE_GRADE
# combine with stateHistory
user_submission_df = user_submission_df.append(grade_history_df)
# teacher actions can show up on student histories and vice-versa
user_submission_df.drop_duplicates(subset=["SourceSystemIdentifier"], inplace=True)
# finish with common columns
user_submission_df["ActivityTimeInMinutes"] = ""
user_submission_df["Content"] = ""
user_submission_df["SourceSystem"] = SOURCE_SYSTEM
user_submission_df["SourceCreateDate"] = "" # No create date available from API
user_submission_df["SourceLastModifiedDate"] = "" # No modified date available from API
# group by section id as a Dict of DataFrames
result: Dict[str, DataFrame] = dict(
tuple(user_submission_df.groupby(["LMSSectionIdentifier"]))
)
return result
|
normal
|
{
"blob_id": "d6a760774b45454c959c2932d7b28deee7f81872",
"index": 318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-3": "<mask token>\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-4": "import json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-5": "# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\nimport json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\n\nACTIVITY_TYPE_STATE = \"Submission State Change\"\nACTIVITY_TYPE_GRADE = \"Submission Grade Change\"\n\n\ndef submissions_to_user_submission_activities_dfs(\n submissions_df: DataFrame,\n) -> Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"submissionHistory\" in submissions_df.columns\n assert \"id\" in submissions_df.columns\n assert \"courseId\" in submissions_df.columns\n assert \"courseWorkId\" in submissions_df.columns\n\n # convert json-like submissionHistory string to list of dicts\n submissions_df[\"submissionHistory\"] = submissions_df[\"submissionHistory\"].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df[\"AssignmentIdentifier\"] = submissions_df[\n [\"courseId\", \"courseWorkId\"]\n ].agg(\"-\".join, axis=1)\n\n submissions_df = submissions_df[[\"id\", \"courseId\", \"courseWorkId\", \"submissionHistory\", \"AssignmentIdentifier\", \"CreateDate\", \"LastModifiedDate\"]]\n\n # explode submissionHistory lists into rows with other columns duplicated\n history_df = submissions_df.explode(column=\"submissionHistory\") # type: ignore\n\n # expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns\n history_df = history_df[\"submissionHistory\"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')\n history_df.drop(columns=[\"submissionHistory\"], inplace=True)\n\n # expand stateHistory (can assume exists, should always have at least one \"CREATED\" entry)\n user_submission_df = concat([history_df, history_df[\"stateHistory\"].apply(Series)], axis=1)\n user_submission_df.dropna(subset=[\"stateHistory\"], inplace=True)\n\n # enrich stateHistory\n user_submission_df[\"SourceSystemIdentifier\"] = \"S-\" + user_submission_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"stateTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n user_submission_df = user_submission_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"stateTimestamp\",\n \"state\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n user_submission_df = user_submission_df.rename(\n columns={\n \"stateTimestamp\": \"ActivityDateTime\",\n \"state\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n user_submission_df[\"ActivityType\"] = ACTIVITY_TYPE_STATE\n\n # expand gradeHistory if exists\n if \"gradeHistory\" in history_df:\n grade_history_df = concat([history_df, history_df[\"gradeHistory\"].apply(Series)], axis=1)\n grade_history_df.dropna(subset=[\"gradeHistory\"], inplace=True)\n\n # enrich gradeHistory\n grade_history_df[\"SourceSystemIdentifier\"] = \"G-\" + grade_history_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"gradeTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n grade_history_df = grade_history_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"gradeTimestamp\",\n \"gradeChangeType\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n grade_history_df = grade_history_df.rename(\n columns={\n \"gradeTimestamp\": \"ActivityDateTime\",\n \"gradeChangeType\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n grade_history_df[\"ActivityType\"] = ACTIVITY_TYPE_GRADE\n\n # combine with stateHistory\n user_submission_df = user_submission_df.append(grade_history_df)\n\n # teacher actions can show up on student histories and vice-versa\n user_submission_df.drop_duplicates(subset=[\"SourceSystemIdentifier\"], inplace=True)\n\n # finish with common columns\n user_submission_df[\"ActivityTimeInMinutes\"] = \"\"\n user_submission_df[\"Content\"] = \"\"\n user_submission_df[\"SourceSystem\"] = SOURCE_SYSTEM\n user_submission_df[\"SourceCreateDate\"] = \"\" # No create date available from API\n user_submission_df[\"SourceLastModifiedDate\"] = \"\" # No modified date available from API\n\n # group by section id as a Dict of DataFrames\n result: Dict[str, DataFrame] = dict(\n tuple(user_submission_df.groupby([\"LMSSectionIdentifier\"]))\n )\n\n return result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NVCComparator:
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==
'NVC')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EqualityComparator:
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator:
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==
'NVC')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Comparator:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class EqualityComparator:
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator:
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==
'NVC')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tuple_to_string(tuptup):
""" Converts a tuple to its string representation. Uses different separators (;, /, |) for
different depths of the representation.
Parameters
----------
tuptup : list
Tuple to convert to its string representation.
Returns
-------
str
String representation of the input tuple.
"""
def join_deepest(tup, sep=';'):
""" Recursive function to create the string representation for the deepest level of the
tuptup list.
Parameters
----------
tup : object
Element to join if list or list of lists.
sep : str, optional
Separation character to join the list elements by.
Returns
-------
object
List containing joined string in max depth. Str if input depth = 1.
"""
if not isinstance(tup, list):
return tup
if not isinstance(tup[0], list):
return sep.join(tup)
for idx, val in enumerate(tup):
tup[idx] = join_deepest(val, sep)
return tup
tup = copy.deepcopy(tuptup)
tup = join_deepest(tup, ';')
tup = join_deepest(tup, '/')
tup = join_deepest(tup, '|')
return tup
class Comparator:
""" Comparator base class.
"""
def compare(self, obj_a, obj_b):
""" Base comparison method.
Parameters
----------
obj_a : object
Object A for comparison.
obj_b : object
Object B for comparison.
Returns
-------
object
Comparison result.
"""
raise NotImplementedError()
class EqualityComparator:
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator:
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==
'NVC')
<|reserved_special_token_1|>
""" Contains different comparator classes for model output data structures.
"""
import copy
def tuple_to_string(tuptup):
""" Converts a tuple to its string representation. Uses different separators (;, /, |) for
different depths of the representation.
Parameters
----------
tuptup : list
Tuple to convert to its string representation.
Returns
-------
str
String representation of the input tuple.
"""
def join_deepest(tup, sep=';'):
""" Recursive function to create the string representation for the deepest level of the
tuptup list.
Parameters
----------
tup : object
Element to join if list or list of lists.
sep : str, optional
Separation character to join the list elements by.
Returns
-------
object
List containing joined string in max depth. Str if input depth = 1.
"""
if not isinstance(tup, list):
return tup
if not isinstance(tup[0], list):
return sep.join(tup)
for idx, val in enumerate(tup):
tup[idx] = join_deepest(val, sep)
return tup
tup = copy.deepcopy(tuptup)
tup = join_deepest(tup, ';')
tup = join_deepest(tup, '/')
tup = join_deepest(tup, '|')
return tup
class Comparator():
""" Comparator base class.
"""
def compare(self, obj_a, obj_b):
""" Base comparison method.
Parameters
----------
obj_a : object
Object A for comparison.
obj_b : object
Object B for comparison.
Returns
-------
object
Comparison result.
"""
raise NotImplementedError()
class EqualityComparator():
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator():
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
|
flexible
|
{
"blob_id": "9c935e9ef298484d565256a420b867e800c3df55",
"index": 3243,
"step-1": "<mask token>\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-2": "<mask token>\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-3": "<mask token>\n\n\nclass Comparator:\n <mask token>\n <mask token>\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-4": "<mask token>\n\n\ndef tuple_to_string(tuptup):\n \"\"\" Converts a tuple to its string representation. Uses different separators (;, /, |) for\n different depths of the representation.\n\n Parameters\n ----------\n tuptup : list\n Tuple to convert to its string representation.\n\n Returns\n -------\n str\n String representation of the input tuple.\n\n \"\"\"\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup\n\n\nclass Comparator:\n \"\"\" Comparator base class.\n\n \"\"\"\n\n def compare(self, obj_a, obj_b):\n \"\"\" Base comparison method.\n\n Parameters\n ----------\n obj_a : object\n Object A for comparison.\n\n obj_b : object\n Object B for comparison.\n\n Returns\n -------\n object\n Comparison result.\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-5": "\"\"\" Contains different comparator classes for model output data structures.\n\n\"\"\"\n\nimport copy\n\ndef tuple_to_string(tuptup):\n \"\"\" Converts a tuple to its string representation. Uses different separators (;, /, |) for\n different depths of the representation.\n\n Parameters\n ----------\n tuptup : list\n Tuple to convert to its string representation.\n\n Returns\n -------\n str\n String representation of the input tuple.\n\n \"\"\"\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup\n\nclass Comparator():\n \"\"\" Comparator base class.\n\n \"\"\"\n\n def compare(self, obj_a, obj_b):\n \"\"\" Base comparison method.\n\n Parameters\n ----------\n obj_a : object\n Object A for comparison.\n\n obj_b : object\n Object B for comparison.\n\n Returns\n -------\n object\n Comparison result.\n\n \"\"\"\n\n raise NotImplementedError()\n\nclass EqualityComparator():\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\nclass NVCComparator():\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')\n",
"step-ids": [
3,
6,
7,
10,
12
]
}
|
[
3,
6,
7,
10,
12
] |
import math
import random
import pygame
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption('space invaders')
background = pygame.image.load('background.png')
score = 0
previous_score = 0
score_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)
textX = 10
testY = 10
# intro
intro = True
intro_text = "SpaceInvaders"
intro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
intro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
# PlayButton
play_button = pygame.image.load('play-button.png')
play_button_X = (SCREEN_WIDTH / 2) - play_button.get_width()
play_button_Y = (SCREEN_HEIGHT / (4 / 3)) - play_button.get_height()
# GameOver
gameover = False
gameover_text = "Game Over"
replay_button = pygame.image.load('replay.png')
# player
player_image = pygame.image.load('spaceship.png')
player_X = 370
player_Y = 480
player_movement = 0
# bullet
bullet_image = pygame.image.load('hot.png')
bullet_X = []
bullet_Y = []
bullet_movement = 0.7
bullet_fired = []
num_bullet = 1
for i in range(num_bullet):
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
# enemy
enemy_image = pygame.image.load('ufo.png')
enemy_X = []
enemy_Y = []
enemy_X_movement = []
enemy_Y_movement = 40
num_enemies = 2
# gamespeedincrement
gamespeed = 0
gamespeed_increment = 0.05
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + (math.pow(eY - bY, 2)))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render("Score: " + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=((SCREEN_WIDTH / 2) + 3, (SCREEN_HEIGHT / 2) + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height():
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
dt = clock.tick(60)
while intro:
show_intro()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
intro = False
pygame.display.update()
while gameover:
game_over_screen()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
reset()
gameover = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_movement = -0.2 - gamespeed
if event.key == pygame.K_RIGHT:
player_movement = 0.2 + gamespeed
if event.key == pygame.K_SPACE:
for i in range(num_bullet):
if not bullet_fired[i]:
bullet_X[i] = player_X
fire_bullet(bullet_X[i], bullet_Y[i], i)
break
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_movement = 0
# playermovement
player_X += player_movement * dt
if player_X <= 1:
player_X = 1
elif player_X >= 735:
player_X = 735
# bulletmovement
for i in range(num_bullet):
if bullet_Y[i] <= 1:
reset_bullet(i)
if bullet_fired[i]:
bullet_Y[i] -= bullet_movement * dt
fire_bullet(bullet_X[i], bullet_Y[i], i)
# enemy_movement
for i in range(num_enemies):
if enemy_Y[i] >= 440:
gameover = True
for j in range(num_bullet):
if bullet_fired[j]:
collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j], bullet_Y[j])
if collision:
reset_enemy(i)
reset_bullet(j)
score += 1
if score != 0 and previous_score != score:
if score % 3 == 0:
add_enemy()
print("added enemy")
if score % 10 == 0:
gamespeed += gamespeed_increment
print("increased gamespeed")
if score % 20 == 0:
add_bullet()
print("added bullet")
previous_score = score
if enemy_X_movement[i] < 0:
enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt
else:
enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt
if enemy_X[i] <= 1:
enemy_X[i] = 2
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
elif enemy_X[i] >= 735:
enemy_X[i] = 734
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
spawn_enemy(enemy_X[i], enemy_Y[i])
player(player_X, player_Y)
show_score()
pygame.display.update()
|
normal
|
{
"blob_id": "f5dffa3c22bb35ed07cb5ca28f2ba02ea3c07dda",
"index": 1083,
"step-1": "<mask token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<mask token>\n",
"step-3": "<mask token>\npygame.init()\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption('space invaders')\nbackground = pygame.image.load('background.png')\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\nintro = True\nintro_text = 'SpaceInvaders'\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = SCREEN_WIDTH / 2 - play_button.get_width()\nplay_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()\ngameover = False\ngameover_text = 'Game Over'\nreplay_button = pygame.image.load('replay.png')\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\ngamespeed = 0\ngamespeed_increment = 0.05\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n intro = False\n pygame.display.update()\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n reset()\n gameover = False\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],\n bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print('added enemy')\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print('increased gamespeed')\n if score % 20 == 0:\n add_bullet()\n print('added bullet')\n previous_score = score\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n spawn_enemy(enemy_X[i], enemy_Y[i])\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"step-4": "import math\nimport random\nimport pygame\npygame.init()\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption('space invaders')\nbackground = pygame.image.load('background.png')\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\nintro = True\nintro_text = 'SpaceInvaders'\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = SCREEN_WIDTH / 2 - play_button.get_width()\nplay_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()\ngameover = False\ngameover_text = 'Game Over'\nreplay_button = pygame.image.load('replay.png')\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\ngamespeed = 0\ngamespeed_increment = 0.05\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n intro = False\n pygame.display.update()\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n reset()\n gameover = False\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],\n bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print('added enemy')\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print('increased gamespeed')\n if score % 20 == 0:\n add_bullet()\n print('added bullet')\n previous_score = score\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n spawn_enemy(enemy_X[i], enemy_Y[i])\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"step-5": "import math\nimport random\n\nimport pygame\n\npygame.init()\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\nclock = pygame.time.Clock()\n\npygame.display.set_caption('space invaders')\n\nbackground = pygame.image.load('background.png')\n\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\n\n# intro\nintro = True\nintro_text = \"SpaceInvaders\"\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\n\n# PlayButton\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = (SCREEN_WIDTH / 2) - play_button.get_width()\nplay_button_Y = (SCREEN_HEIGHT / (4 / 3)) - play_button.get_height()\n\n# GameOver\ngameover = False\ngameover_text = \"Game Over\"\nreplay_button = pygame.image.load('replay.png')\n\n# player\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\n\n# bullet\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n# enemy\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\n\n# gamespeedincrement\ngamespeed = 0\ngamespeed_increment = 0.05\n\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + (math.pow(eY - bY, 2)))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render(\"Score: \" + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=((SCREEN_WIDTH / 2) + 3, (SCREEN_HEIGHT / 2) + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height():\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if play_button_clicked():\n intro = False\n\n pygame.display.update()\n\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if play_button_clicked():\n reset()\n gameover = False\n\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n\n # playermovement\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n\n # bulletmovement\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n\n # enemy_movement\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j], bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print(\"added enemy\")\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print(\"increased gamespeed\")\n if score % 20 == 0:\n add_bullet()\n print(\"added bullet\")\n previous_score = score\n\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += (enemy_Y_movement + gamespeed)\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += (enemy_Y_movement + gamespeed)\n\n spawn_enemy(enemy_X[i], enemy_Y[i])\n\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"step-ids": [
15,
16,
18,
19,
20
]
}
|
[
15,
16,
18,
19,
20
] |
from collections import Counter
N = int(input())
lst = list(map(int, input().split()))
ans = []
for i in range(N):
ans.append(abs(i + 1 - lst[i]))
s = Counter(ans)
rst = []
for i in s:
rst.append([i, s[i]])
rst.sort(key=lambda x: x[0], reverse=True)
for i in rst:
if i[1] > 1:
print(i[0], i[1])
|
normal
|
{
"blob_id": "decd5d50025fc3b639be2f803d917ff313cf7219",
"index": 8838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n ans.append(abs(i + 1 - lst[i]))\n<mask token>\nfor i in s:\n rst.append([i, s[i]])\nrst.sort(key=lambda x: x[0], reverse=True)\nfor i in rst:\n if i[1] > 1:\n print(i[0], i[1])\n",
"step-3": "<mask token>\nN = int(input())\nlst = list(map(int, input().split()))\nans = []\nfor i in range(N):\n ans.append(abs(i + 1 - lst[i]))\ns = Counter(ans)\nrst = []\nfor i in s:\n rst.append([i, s[i]])\nrst.sort(key=lambda x: x[0], reverse=True)\nfor i in rst:\n if i[1] > 1:\n print(i[0], i[1])\n",
"step-4": "from collections import Counter\nN = int(input())\nlst = list(map(int, input().split()))\nans = []\nfor i in range(N):\n ans.append(abs(i + 1 - lst[i]))\ns = Counter(ans)\nrst = []\nfor i in s:\n rst.append([i, s[i]])\nrst.sort(key=lambda x: x[0], reverse=True)\nfor i in rst:\n if i[1] > 1:\n print(i[0], i[1])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
RANGES = {
# Intervalles de la gamme majeure
0: [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1],
# Intervalles de la gamme mineure naturelle
1: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0],
# Intervalles de la gamme mineure harmonique
2: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1]
}
RANGES_NAMES = {
'fr': ['Majeur', 'Mineur naturel', 'Mineur harmonique']
}
# Nombre total de notes
N = 12
# Nombre de nombre par gamme
N_T = 7
NOTES = {
'fr': ['DO', 'DO#', 'RE', 'RE#', 'MI', 'FA', 'FA#', 'SOL', 'SOL#', 'LA', 'LA#', 'SI']
}
CHORDS = {
'fr': {
0: ['', 'm', 'm', '', '', 'm', 'dim'],
1: ['m', 'dim', '', 'm', 'm', '', ''],
2: ['', 'm', 'm', '', '', 'm', 'dim']
}
}
def get_notes_from_range(r, t):
""" Return all notes from a given range"""
# calcul du tableau de notes
tab = []
for i in range(N):
n = (i - t)%N
tab.append(RANGES[r][n])
return tab
def get_range_chords(r):
return []
def export_range(res, lg):
notes = [NOTES[lg][(n + res['keynote'] )% 12] for n in range(N) if res['notes'][(n + res['keynote'] )% 12]]
return {
'keynote': NOTES[lg][res['keynote']],
'range': RANGES_NAMES[lg][res['range']],
'notes': notes,
'pourcentage': res['pourcentage']
# 'Accords': [notes[i] + CHORDS[lg][res['range']][i] for i in range(N_T)]
}
def print_range(r):
print r['Tonique'] + ' ' + r['Gamme']
print r['Accords']
print
## traitement
def range_ranking(given_notes):
result = []
# pour chaque tonique:
for t in range(N):
# pour chaque mode:
#for m in range(0, 12):
# pour chaque gamme:
for r in range(len(RANGES)):
# re-initialisation du pourcentage
pourcentage = 0.0
# obtention de toutes les notes de la gamme consideree
range_notes = get_notes_from_range(r, t)
# pour chaque note connue:
for i in given_notes:
# si la note connue est dans la gamme:
if range_notes[i] == 1:
#alors pourcentage += 1
pourcentage += 1
else:
pourcentage -= 1
pourcentage = (pourcentage/len(given_notes)) * 100
result.append({'keynote': t,
# 'mode': m,
'range': r,
'notes': range_notes,
'pourcentage': pourcentage})
return result
def main(notes, lg):
# Compute pourcentage for every registered ranges
unsorted_ranking = range_ranking(notes)
sorted_ranking = sorted(unsorted_ranking, key=lambda g: g['pourcentage'], reverse=True)
best_results = [r for r in sorted_ranking if r['pourcentage'] == sorted_ranking[0]['pourcentage']]
return best_results
def get_ranges(given_notes, lg='fr'):
errors = {}
results = []
# Clean user entry
print 'g' + str(given_notes)
notes = [NOTES['fr'].index(n) for n in given_notes]
print 'n' + str(notes)
try:
best_results = main(notes, lg)
except Exception as e:
errors['status'] = 'error'
errors['message'] = e
return errors
errors['status'] = 'success'
errors['message'] = ''
errors['result'] = [export_range(r, lg) for r in best_results]
return errors
if __name__ == '__main__':
#TODO: Test that arrays have consistents length
# Get entry from user
notes = [0, 2, 4, 5, 7, 9, 11]
lg = 'fr'
print [NOTES[lg][i] for i in notes]
print
print "Ces notes correspondent a la gamme:"
#TODO: Clean user entry
best_results = main(notes, lg)
for r in best_results:
print export_range(r, lg)
|
normal
|
{
"blob_id": "18bad56ff6d230e63e83174672b8aa8625c1ebb4",
"index": 994,
"step-1": "\nRANGES = {\n # Intervalles de la gamme majeure\n 0: [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], \n # Intervalles de la gamme mineure naturelle\n 1: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0],\n # Intervalles de la gamme mineure harmonique \n 2: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1] \n}\n\nRANGES_NAMES = {\n 'fr': ['Majeur', 'Mineur naturel', 'Mineur harmonique']\n}\n\n# Nombre total de notes\nN = 12\n\n# Nombre de nombre par gamme\nN_T = 7\n\nNOTES = {\n 'fr': ['DO', 'DO#', 'RE', 'RE#', 'MI', 'FA', 'FA#', 'SOL', 'SOL#', 'LA', 'LA#', 'SI']\n}\n\nCHORDS = {\n 'fr': {\n 0: ['', 'm', 'm', '', '', 'm', 'dim'],\n 1: ['m', 'dim', '', 'm', 'm', '', ''], \n 2: ['', 'm', 'm', '', '', 'm', 'dim']\n }\n}\n\ndef get_notes_from_range(r, t):\n \"\"\" Return all notes from a given range\"\"\"\n # calcul du tableau de notes\n tab = []\n for i in range(N): \n n = (i - t)%N\n tab.append(RANGES[r][n])\n \n return tab \n \ndef get_range_chords(r):\n return []\n \n\ndef export_range(res, lg):\n notes = [NOTES[lg][(n + res['keynote'] )% 12] for n in range(N) if res['notes'][(n + res['keynote'] )% 12]]\n return {\n 'keynote': NOTES[lg][res['keynote']], \n 'range': RANGES_NAMES[lg][res['range']], \n 'notes': notes, \n 'pourcentage': res['pourcentage']\n # 'Accords': [notes[i] + CHORDS[lg][res['range']][i] for i in range(N_T)]\n }\n \n \ndef print_range(r):\n print r['Tonique'] + ' ' + r['Gamme']\n print r['Accords']\n print \n \n\n## traitement\ndef range_ranking(given_notes):\n result = []\n\n # pour chaque tonique:\n for t in range(N):\n # pour chaque mode:\n #for m in range(0, 12):\n # pour chaque gamme:\n for r in range(len(RANGES)):\n # re-initialisation du pourcentage\n pourcentage = 0.0\n # obtention de toutes les notes de la gamme consideree\n range_notes = get_notes_from_range(r, t) \n # pour chaque note connue:\n for i in given_notes:\n # si la note connue est dans la gamme:\n if range_notes[i] == 1:\n #alors pourcentage += 1\n pourcentage += 1\n else:\n pourcentage -= 1\n \n pourcentage = (pourcentage/len(given_notes)) * 100\n result.append({'keynote': t, \n # 'mode': m,\n 'range': r,\n 'notes': range_notes,\n 'pourcentage': pourcentage})\n\n return result\n\ndef main(notes, lg):\n # Compute pourcentage for every registered ranges\n unsorted_ranking = range_ranking(notes)\n sorted_ranking = sorted(unsorted_ranking, key=lambda g: g['pourcentage'], reverse=True)\n \n best_results = [r for r in sorted_ranking if r['pourcentage'] == sorted_ranking[0]['pourcentage']]\n return best_results\n\n\ndef get_ranges(given_notes, lg='fr'):\n \n errors = {}\n results = []\n # Clean user entry\n print 'g' + str(given_notes)\n notes = [NOTES['fr'].index(n) for n in given_notes]\n\n print 'n' + str(notes)\n\n try:\n best_results = main(notes, lg)\n except Exception as e:\n errors['status'] = 'error'\n errors['message'] = e\n return errors\n\n errors['status'] = 'success'\n errors['message'] = ''\n errors['result'] = [export_range(r, lg) for r in best_results]\n\n return errors\n\n\nif __name__ == '__main__':\n\n #TODO: Test that arrays have consistents length\n \n # Get entry from user\n notes = [0, 2, 4, 5, 7, 9, 11]\n lg = 'fr'\n print [NOTES[lg][i] for i in notes]\n print\n print \"Ces notes correspondent a la gamme:\"\n \n #TODO: Clean user entry\n\n best_results = main(notes, lg)\n \n for r in best_results:\n print export_range(r, lg)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
"""
demo_mininet_topo.py
Sample topology class with Mininet.
G = {V, E}
V = {h1, h2, h3, h4, h51, h52, s0, s1, s4, s5}
# of hosts = 6
# of switches = 4
E = {
(h1, s1), (h2, s1), (h3, s1),
(h4, s4),
(h51, s5), (h52, s5),
(s0, s1), (s0, s4), (s5, s4)
}
"""
from mininet.topo import Topo
class DemoTopology(Topo):
def __init__(self):
Topo.__init__(self)
# Add some hosts
h1 = self.h1 = self.addHost('h1')
h2 = self.h2 = self.addHost('h2')
h3 = self.h3 = self.addHost('h3')
h4 = self.h4 = self.addHost('h4')
h51 = self.h51 = self.addHost('h51')
h52 = self.h52 = self.addHost('h52')
# Add switches
s0 = self.s0 = self.addSwitch('s0')
s1 = self.s1 = self.addSwitch('s1')
s4 = self.s4 = self.addSwitch('s4')
s5 = self.s5 = self.addSwitch('s5')
# Link hosts with switches
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s1)
self.addLink(h4, s4)
self.addLink(h51, s5)
self.addLink(h52, s5)
# Link switches with switches
self.addLink(s0, s1)
self.addLink(s0, s4)
self.addLink(s5, s4)
topos = {
'demo': lambda: DemoTopology()
}
|
normal
|
{
"blob_id": "8c69813bc576a56c25c828fe24e2707e65ac0d0d",
"index": 5628,
"step-1": "<mask token>\n\n\nclass DemoTopology(Topo):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\ntopos = {'demo': lambda : DemoTopology()}\n",
"step-4": "<mask token>\nfrom mininet.topo import Topo\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\ntopos = {'demo': lambda : DemoTopology()}\n",
"step-5": "#!/usr/bin/python\n\n\"\"\"\ndemo_mininet_topo.py\n\nSample topology class with Mininet.\n\nG = {V, E}\nV = {h1, h2, h3, h4, h51, h52, s0, s1, s4, s5}\n\t# of hosts = 6\n\t# of switches = 4\nE = {\n\t\t(h1, s1), (h2, s1), (h3, s1), \n\t \t(h4, s4), \n\t\t(h51, s5), (h52, s5), \n\t\t(s0, s1), (s0, s4), (s5, s4)\n\t}\n\"\"\"\n\nfrom mininet.topo import Topo\n\nclass DemoTopology(Topo):\n\t\n\tdef __init__(self):\n\t\t\n\t\tTopo.__init__(self)\n\t\t\n\t\t# Add some hosts\n\t\th1 = self.h1 = self.addHost('h1')\n\t\th2 = self.h2 = self.addHost('h2')\n\t\th3 = self.h3 = self.addHost('h3')\n\t\th4 = self.h4 = self.addHost('h4')\n\t\th51 = self.h51 = self.addHost('h51')\n\t\th52 = self.h52 = self.addHost('h52')\n\t\t\n\t\t# Add switches\n\t\ts0 = self.s0 = self.addSwitch('s0')\n\t\ts1 = self.s1 = self.addSwitch('s1')\n\t\ts4 = self.s4 = self.addSwitch('s4')\n\t\ts5 = self.s5 = self.addSwitch('s5')\n\t\t\n\t\t# Link hosts with switches\n\t\tself.addLink(h1, s1)\n\t\tself.addLink(h2, s1)\n\t\tself.addLink(h3, s1)\n\t\tself.addLink(h4, s4)\n\t\tself.addLink(h51, s5)\n\t\tself.addLink(h52, s5)\n\t\t\n\t\t# Link switches with switches\n\t\tself.addLink(s0, s1)\n\t\tself.addLink(s0, s4)\n\t\tself.addLink(s5, s4)\n\t\ntopos = {\n\t'demo': lambda: DemoTopology()\n}\t",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ChatRoomScreen(Screen):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_display_info(self, *args):
if (self.chat_history != self.parent.client_protocol.chat_history.
history_string):
self.chat_history = (self.parent.client_protocol.chat_history.
history_string)
if self.user_list != self.parent.client_protocol.user_list:
print('User List mismatch')
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = '@{}, '.format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print('SERVER SHUTDOWN')
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StartScreen(Screen):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ChatRoomScreen(Screen):
chat_history = StringProperty('')
user_list = StringProperty('')
def on_enter(self):
Clock.schedule_once(self.schedule_update_display_info)
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
def update_user_list_buttons(self):
self.clear_user_list_display()
for user in self.user_list.split('\n'):
button = ModalPopupButton(text=user)
self.ids.user_list.add_widget(button)
self.ids.user_list.add_widget(Widget())
def clear_user_list_display(self):
self.ids.user_list.clear_widgets()
def update_display_info(self, *args):
if (self.chat_history != self.parent.client_protocol.chat_history.
history_string):
self.chat_history = (self.parent.client_protocol.chat_history.
history_string)
if self.user_list != self.parent.client_protocol.user_list:
print('User List mismatch')
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = '@{}, '.format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print('SERVER SHUTDOWN')
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StartScreen(Screen):
def attempt_to_connect(self, server_ip, username, password):
self.parent.client_protocol.start_connection(server_ip, username,
password)
self.open_connecting_popup()
self.timeout = 0
self.wait_For_server_response_event = Clock.schedule_interval(self.
wait_for_server_response, 1)
def wait_for_server_response(self, *args):
print(self.timeout)
if self.parent.client_protocol.login_success:
self.popup.dismiss()
self.wait_For_server_response_event.cancel()
self.parent.current = 'ChatRoomScreen'
elif self.timeout == 5:
self.failed_to_connect(message=
'Failed to connect to server. Please try again or check your network connection.'
)
elif self.parent.client_protocol.invalid_credentials:
self.parent.client_protocol.invalid_credentials = False
self.failed_to_connect(message=
'Invalid username/password combination. Please try again.')
else:
self.timeout += 1
def failed_to_connect(self, message):
print('FAILED TO CONNECT')
self.popup.dismiss()
self.open_failed_popup(message=message)
self.wait_For_server_response_event.cancel()
def open_connecting_popup(self):
self.popup = SubmissionPopup()
self.popup.open()
def open_failed_popup(self, message):
self.popup = FailedSubmissionPopup(message=message)
self.popup.open()
class ChatRoomScreen(Screen):
chat_history = StringProperty('')
user_list = StringProperty('')
def on_enter(self):
Clock.schedule_once(self.schedule_update_display_info)
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
def update_user_list_buttons(self):
self.clear_user_list_display()
for user in self.user_list.split('\n'):
button = ModalPopupButton(text=user)
self.ids.user_list.add_widget(button)
self.ids.user_list.add_widget(Widget())
def clear_user_list_display(self):
self.ids.user_list.clear_widgets()
def update_display_info(self, *args):
if (self.chat_history != self.parent.client_protocol.chat_history.
history_string):
self.chat_history = (self.parent.client_protocol.chat_history.
history_string)
if self.user_list != self.parent.client_protocol.user_list:
print('User List mismatch')
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = '@{}, '.format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print('SERVER SHUTDOWN')
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PORT = 1776
TIME_UNIT = 'MINUTES'
class RootScreen(ScreenManager):
def __init__(self, client_protocol, **kwargs):
super().__init__(**kwargs)
self.client_protocol = client_protocol
class StartScreen(Screen):
def attempt_to_connect(self, server_ip, username, password):
self.parent.client_protocol.start_connection(server_ip, username,
password)
self.open_connecting_popup()
self.timeout = 0
self.wait_For_server_response_event = Clock.schedule_interval(self.
wait_for_server_response, 1)
def wait_for_server_response(self, *args):
print(self.timeout)
if self.parent.client_protocol.login_success:
self.popup.dismiss()
self.wait_For_server_response_event.cancel()
self.parent.current = 'ChatRoomScreen'
elif self.timeout == 5:
self.failed_to_connect(message=
'Failed to connect to server. Please try again or check your network connection.'
)
elif self.parent.client_protocol.invalid_credentials:
self.parent.client_protocol.invalid_credentials = False
self.failed_to_connect(message=
'Invalid username/password combination. Please try again.')
else:
self.timeout += 1
def failed_to_connect(self, message):
print('FAILED TO CONNECT')
self.popup.dismiss()
self.open_failed_popup(message=message)
self.wait_For_server_response_event.cancel()
def open_connecting_popup(self):
self.popup = SubmissionPopup()
self.popup.open()
def open_failed_popup(self, message):
self.popup = FailedSubmissionPopup(message=message)
self.popup.open()
class ChatRoomScreen(Screen):
chat_history = StringProperty('')
user_list = StringProperty('')
def on_enter(self):
Clock.schedule_once(self.schedule_update_display_info)
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
def update_user_list_buttons(self):
self.clear_user_list_display()
for user in self.user_list.split('\n'):
button = ModalPopupButton(text=user)
self.ids.user_list.add_widget(button)
self.ids.user_list.add_widget(Widget())
def clear_user_list_display(self):
self.ids.user_list.clear_widgets()
def update_display_info(self, *args):
if (self.chat_history != self.parent.client_protocol.chat_history.
history_string):
self.chat_history = (self.parent.client_protocol.chat_history.
history_string)
if self.user_list != self.parent.client_protocol.user_list:
print('User List mismatch')
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = '@{}, '.format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print('SERVER SHUTDOWN')
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
<|reserved_special_token_1|>
# Standard Library Imports
# Third Party Imports
from kivy.clock import Clock
from kivy.properties import StringProperty
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.widget import Widget
# Local Imports
from client.source.ui.kv_widgets import ModalPopupButton, SubmissionPopup, FailedSubmissionPopup, ServerShutdownPopup
# ====================================
# CONSTANTS
# ====================================
PORT = 1776
# ====================================
# PARAMETERS
# ====================================
TIME_UNIT = 'MINUTES'
class RootScreen(ScreenManager):
def __init__(self, client_protocol, **kwargs):
super().__init__(**kwargs)
self.client_protocol = client_protocol
class StartScreen(Screen):
def attempt_to_connect(self, server_ip, username, password):
self.parent.client_protocol.start_connection(server_ip, username, password)
self.open_connecting_popup()
self.timeout = 0
self.wait_For_server_response_event = Clock.schedule_interval(self.wait_for_server_response, 1)
def wait_for_server_response(self, *args):
print(self.timeout)
# Login success
if self.parent.client_protocol.login_success:
self.popup.dismiss()
self.wait_For_server_response_event.cancel()
self.parent.current = 'ChatRoomScreen'
# Timeout
elif self.timeout == 5:
self.failed_to_connect(message='Failed to connect to server. Please try again or check your network connection.')
# Invalid credentials
elif self.parent.client_protocol.invalid_credentials:
self.parent.client_protocol.invalid_credentials = False
self.failed_to_connect(message='Invalid username/password combination. Please try again.')
else:
self.timeout += 1
def failed_to_connect(self, message):
print("FAILED TO CONNECT")
self.popup.dismiss()
self.open_failed_popup(message=message)
self.wait_For_server_response_event.cancel()
def open_connecting_popup(self):
self.popup = SubmissionPopup()
self.popup.open()
def open_failed_popup(self, message):
self.popup = FailedSubmissionPopup(message=message)
self.popup.open()
class ChatRoomScreen(Screen):
chat_history = StringProperty('')
user_list = StringProperty('')
def on_enter(self):
Clock.schedule_once(self.schedule_update_display_info)
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
def update_user_list_buttons(self):
self.clear_user_list_display()
for user in self.user_list.split("\n"):
button = ModalPopupButton(text=user)
self.ids.user_list.add_widget(button)
self.ids.user_list.add_widget(Widget())
def clear_user_list_display(self):
self.ids.user_list.clear_widgets()
def update_display_info(self, *args):
if self.chat_history != self.parent.client_protocol.chat_history.history_string:
self.chat_history = self.parent.client_protocol.chat_history.history_string
if self.user_list != self.parent.client_protocol.user_list:
print("User List mismatch")
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = "@{}, ".format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print("SERVER SHUTDOWN")
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
|
flexible
|
{
"blob_id": "327e9dcba49419b8a8c320940e333765c1d9b980",
"index": 5997,
"step-1": "<mask token>\n\n\nclass ChatRoomScreen(Screen):\n <mask token>\n <mask token>\n <mask token>\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n <mask token>\n <mask token>\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-2": "<mask token>\n\n\nclass StartScreen(Screen):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-3": "<mask token>\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username,\n password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.\n wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n elif self.timeout == 5:\n self.failed_to_connect(message=\n 'Failed to connect to server. Please try again or check your network connection.'\n )\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message=\n 'Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print('FAILED TO CONNECT')\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-4": "<mask token>\nPORT = 1776\nTIME_UNIT = 'MINUTES'\n\n\nclass RootScreen(ScreenManager):\n\n def __init__(self, client_protocol, **kwargs):\n super().__init__(**kwargs)\n self.client_protocol = client_protocol\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username,\n password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.\n wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n elif self.timeout == 5:\n self.failed_to_connect(message=\n 'Failed to connect to server. Please try again or check your network connection.'\n )\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message=\n 'Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print('FAILED TO CONNECT')\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-5": "# Standard Library Imports\n\n# Third Party Imports\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivy.uix.widget import Widget\n\n# Local Imports\nfrom client.source.ui.kv_widgets import ModalPopupButton, SubmissionPopup, FailedSubmissionPopup, ServerShutdownPopup\n\n# ====================================\n# CONSTANTS\n# ====================================\nPORT = 1776\n\n# ====================================\n# PARAMETERS\n# ====================================\nTIME_UNIT = 'MINUTES'\n\n\nclass RootScreen(ScreenManager):\n def __init__(self, client_protocol, **kwargs):\n super().__init__(**kwargs)\n self.client_protocol = client_protocol\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username, password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n # Login success\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n # Timeout\n elif self.timeout == 5:\n self.failed_to_connect(message='Failed to connect to server. Please try again or check your network connection.')\n # Invalid credentials\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message='Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print(\"FAILED TO CONNECT\")\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split(\"\\n\"):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if self.chat_history != self.parent.client_protocol.chat_history.history_string:\n self.chat_history = self.parent.client_protocol.chat_history.history_string\n\n if self.user_list != self.parent.client_protocol.user_list:\n print(\"User List mismatch\")\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = \"@{}, \".format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print(\"SERVER SHUTDOWN\")\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-ids": [
7,
12,
17,
20,
22
]
}
|
[
7,
12,
17,
20,
22
] |
import face_recognition
from glob import glob
import os.path as osp
class FaceRecognitionLib(object):
"""
face_recognition library を利用した顔認証検証
"""
# クラス変数設定
__data_set_dir = './../../dataset/japanese' # データ・セットディレクトリ
__known_image_idx = (1,) # 既存画像のインデックス
__unknown_image_idx = (2, 3, 4, 5) # 検証画像のインデックス
__tolerance = 0.4 # Recognitionの距離threshold
def __init__(self):
# get sub directory
sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')
# get list of name
self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]
# 既存画像と検証画像のファイルリストを生成する。
known_images_path = []
unknown_images_path = []
for img_idx in self.__known_image_idx:
known_images_path.extend(
[osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])
for img_idx in self.__unknown_image_idx:
unknown_images_path.extend(
[osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])
self.__unknown_images_paths = unknown_images_path
# set face encodings for known faces
self.__known_face_encodings = self.__make_face_encodings(images_path=known_images_path)
print('shape of known_face_encodings = ({}, {})'.format(len(self.__known_face_encodings),
len(self.__known_face_encodings[0])))
@staticmethod
def __make_face_encodings(images_path):
"""
face encode情報を生成する。
"""
face_encodings = []
for img_path in images_path:
img = face_recognition.load_image_file(img_path)
face_encodings.append(face_recognition.face_encodings(img)[0])
return face_encodings
def recognition(self):
"""
Recognition
"""
unknown_face_encodings = self.__make_face_encodings(images_path=self.__unknown_images_paths)
print('shape of unknown_face_encodings = ({}, {})'.format(len(unknown_face_encodings),
len(unknown_face_encodings[0])))
accuracy = 0
wrong = 0
for face_to_compare in self.__known_face_encodings:
print(face_recognition.face_distance(unknown_face_encodings, face_to_compare))
for i, unknown_face_encoding in enumerate(unknown_face_encodings):
img_file = osp.basename(self.__unknown_images_paths[i])
results = face_recognition.compare_faces(self.__known_face_encodings,
unknown_face_encoding,
tolerance=FaceRecognitionLib.__tolerance)
name = "Unknown"
for person in range(len(self.__people)):
if results[person]:
name = self.__people[person]
break
if name in img_file:
accuracy += 1
else:
wrong += 1
print("Found {} in the photo {}".format(name, img_file))
print('accuracy = {}, wrong = {}'.format(accuracy, wrong))
if __name__ == "__main__":
face_recognition_lib = FaceRecognitionLib()
face_recognition_lib.recognition()
|
normal
|
{
"blob_id": "2d69a39be3931aa4c62cadff4cdfad76f6b32c59",
"index": 6473,
"step-1": "<mask token>\n\n\nclass FaceRecognitionLib(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')\n self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]\n known_images_path = []\n unknown_images_path = []\n for img_idx in self.__known_image_idx:\n known_images_path.extend([osp.join(sub_dir, sub_dir.split('/')[\n -2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n for img_idx in self.__unknown_image_idx:\n unknown_images_path.extend([osp.join(sub_dir, sub_dir.split('/'\n )[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n self.__unknown_images_paths = unknown_images_path\n self.__known_face_encodings = self.__make_face_encodings(images_path\n =known_images_path)\n print('shape of known_face_encodings = ({}, {})'.format(len(self.\n __known_face_encodings), len(self.__known_face_encodings[0])))\n <mask token>\n\n def recognition(self):\n \"\"\"\n Recognition\n \"\"\"\n unknown_face_encodings = self.__make_face_encodings(images_path=\n self.__unknown_images_paths)\n print('shape of unknown_face_encodings = ({}, {})'.format(len(\n unknown_face_encodings), len(unknown_face_encodings[0])))\n accuracy = 0\n wrong = 0\n for face_to_compare in self.__known_face_encodings:\n print(face_recognition.face_distance(unknown_face_encodings,\n face_to_compare))\n for i, unknown_face_encoding in enumerate(unknown_face_encodings):\n img_file = osp.basename(self.__unknown_images_paths[i])\n results = face_recognition.compare_faces(self.\n __known_face_encodings, unknown_face_encoding, tolerance=\n FaceRecognitionLib.__tolerance)\n name = 'Unknown'\n for person in range(len(self.__people)):\n if results[person]:\n name = self.__people[person]\n break\n if name in img_file:\n accuracy += 1\n else:\n wrong += 1\n print('Found {} in the photo {}'.format(name, img_file))\n print('accuracy = {}, wrong = {}'.format(accuracy, wrong))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FaceRecognitionLib(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')\n self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]\n known_images_path = []\n unknown_images_path = []\n for img_idx in self.__known_image_idx:\n known_images_path.extend([osp.join(sub_dir, sub_dir.split('/')[\n -2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n for img_idx in self.__unknown_image_idx:\n unknown_images_path.extend([osp.join(sub_dir, sub_dir.split('/'\n )[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n self.__unknown_images_paths = unknown_images_path\n self.__known_face_encodings = self.__make_face_encodings(images_path\n =known_images_path)\n print('shape of known_face_encodings = ({}, {})'.format(len(self.\n __known_face_encodings), len(self.__known_face_encodings[0])))\n\n @staticmethod\n def __make_face_encodings(images_path):\n \"\"\"\n face encode情報を生成する。\n \"\"\"\n face_encodings = []\n for img_path in images_path:\n img = face_recognition.load_image_file(img_path)\n face_encodings.append(face_recognition.face_encodings(img)[0])\n return face_encodings\n\n def recognition(self):\n \"\"\"\n Recognition\n \"\"\"\n unknown_face_encodings = self.__make_face_encodings(images_path=\n self.__unknown_images_paths)\n print('shape of unknown_face_encodings = ({}, {})'.format(len(\n unknown_face_encodings), len(unknown_face_encodings[0])))\n accuracy = 0\n wrong = 0\n for face_to_compare in self.__known_face_encodings:\n print(face_recognition.face_distance(unknown_face_encodings,\n face_to_compare))\n for i, unknown_face_encoding in enumerate(unknown_face_encodings):\n img_file = osp.basename(self.__unknown_images_paths[i])\n results = face_recognition.compare_faces(self.\n __known_face_encodings, unknown_face_encoding, tolerance=\n FaceRecognitionLib.__tolerance)\n name = 'Unknown'\n for person in range(len(self.__people)):\n if results[person]:\n name = self.__people[person]\n break\n if name in img_file:\n accuracy += 1\n else:\n wrong += 1\n print('Found {} in the photo {}'.format(name, img_file))\n print('accuracy = {}, wrong = {}'.format(accuracy, wrong))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FaceRecognitionLib(object):\n <mask token>\n __data_set_dir = './../../dataset/japanese'\n __known_image_idx = 1,\n __unknown_image_idx = 2, 3, 4, 5\n __tolerance = 0.4\n\n def __init__(self):\n sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')\n self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]\n known_images_path = []\n unknown_images_path = []\n for img_idx in self.__known_image_idx:\n known_images_path.extend([osp.join(sub_dir, sub_dir.split('/')[\n -2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n for img_idx in self.__unknown_image_idx:\n unknown_images_path.extend([osp.join(sub_dir, sub_dir.split('/'\n )[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n self.__unknown_images_paths = unknown_images_path\n self.__known_face_encodings = self.__make_face_encodings(images_path\n =known_images_path)\n print('shape of known_face_encodings = ({}, {})'.format(len(self.\n __known_face_encodings), len(self.__known_face_encodings[0])))\n\n @staticmethod\n def __make_face_encodings(images_path):\n \"\"\"\n face encode情報を生成する。\n \"\"\"\n face_encodings = []\n for img_path in images_path:\n img = face_recognition.load_image_file(img_path)\n face_encodings.append(face_recognition.face_encodings(img)[0])\n return face_encodings\n\n def recognition(self):\n \"\"\"\n Recognition\n \"\"\"\n unknown_face_encodings = self.__make_face_encodings(images_path=\n self.__unknown_images_paths)\n print('shape of unknown_face_encodings = ({}, {})'.format(len(\n unknown_face_encodings), len(unknown_face_encodings[0])))\n accuracy = 0\n wrong = 0\n for face_to_compare in self.__known_face_encodings:\n print(face_recognition.face_distance(unknown_face_encodings,\n face_to_compare))\n for i, unknown_face_encoding in enumerate(unknown_face_encodings):\n img_file = osp.basename(self.__unknown_images_paths[i])\n results = face_recognition.compare_faces(self.\n __known_face_encodings, unknown_face_encoding, tolerance=\n FaceRecognitionLib.__tolerance)\n name = 'Unknown'\n for person in range(len(self.__people)):\n if results[person]:\n name = self.__people[person]\n break\n if name in img_file:\n accuracy += 1\n else:\n wrong += 1\n print('Found {} in the photo {}'.format(name, img_file))\n print('accuracy = {}, wrong = {}'.format(accuracy, wrong))\n\n\n<mask token>\n",
"step-4": "import face_recognition\nfrom glob import glob\nimport os.path as osp\n\n\nclass FaceRecognitionLib(object):\n \"\"\"\n face_recognition library を利用した顔認証検証\n \"\"\"\n __data_set_dir = './../../dataset/japanese'\n __known_image_idx = 1,\n __unknown_image_idx = 2, 3, 4, 5\n __tolerance = 0.4\n\n def __init__(self):\n sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')\n self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]\n known_images_path = []\n unknown_images_path = []\n for img_idx in self.__known_image_idx:\n known_images_path.extend([osp.join(sub_dir, sub_dir.split('/')[\n -2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n for img_idx in self.__unknown_image_idx:\n unknown_images_path.extend([osp.join(sub_dir, sub_dir.split('/'\n )[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\n self.__unknown_images_paths = unknown_images_path\n self.__known_face_encodings = self.__make_face_encodings(images_path\n =known_images_path)\n print('shape of known_face_encodings = ({}, {})'.format(len(self.\n __known_face_encodings), len(self.__known_face_encodings[0])))\n\n @staticmethod\n def __make_face_encodings(images_path):\n \"\"\"\n face encode情報を生成する。\n \"\"\"\n face_encodings = []\n for img_path in images_path:\n img = face_recognition.load_image_file(img_path)\n face_encodings.append(face_recognition.face_encodings(img)[0])\n return face_encodings\n\n def recognition(self):\n \"\"\"\n Recognition\n \"\"\"\n unknown_face_encodings = self.__make_face_encodings(images_path=\n self.__unknown_images_paths)\n print('shape of unknown_face_encodings = ({}, {})'.format(len(\n unknown_face_encodings), len(unknown_face_encodings[0])))\n accuracy = 0\n wrong = 0\n for face_to_compare in self.__known_face_encodings:\n print(face_recognition.face_distance(unknown_face_encodings,\n face_to_compare))\n for i, unknown_face_encoding in enumerate(unknown_face_encodings):\n img_file = osp.basename(self.__unknown_images_paths[i])\n results = face_recognition.compare_faces(self.\n __known_face_encodings, unknown_face_encoding, tolerance=\n FaceRecognitionLib.__tolerance)\n name = 'Unknown'\n for person in range(len(self.__people)):\n if results[person]:\n name = self.__people[person]\n break\n if name in img_file:\n accuracy += 1\n else:\n wrong += 1\n print('Found {} in the photo {}'.format(name, img_file))\n print('accuracy = {}, wrong = {}'.format(accuracy, wrong))\n\n\nif __name__ == '__main__':\n face_recognition_lib = FaceRecognitionLib()\n face_recognition_lib.recognition()\n",
"step-5": "import face_recognition\r\nfrom glob import glob\r\nimport os.path as osp\r\n\r\n\r\nclass FaceRecognitionLib(object):\r\n \"\"\"\r\n face_recognition library を利用した顔認証検証\r\n \"\"\"\r\n # クラス変数設定\r\n __data_set_dir = './../../dataset/japanese' # データ・セットディレクトリ\r\n __known_image_idx = (1,) # 既存画像のインデックス\r\n __unknown_image_idx = (2, 3, 4, 5) # 検証画像のインデックス\r\n __tolerance = 0.4 # Recognitionの距離threshold\r\n\r\n def __init__(self):\r\n # get sub directory\r\n sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')\r\n\r\n # get list of name\r\n self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]\r\n\r\n # 既存画像と検証画像のファイルリストを生成する。\r\n known_images_path = []\r\n unknown_images_path = []\r\n for img_idx in self.__known_image_idx:\r\n known_images_path.extend(\r\n [osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\r\n\r\n for img_idx in self.__unknown_image_idx:\r\n unknown_images_path.extend(\r\n [osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])\r\n\r\n self.__unknown_images_paths = unknown_images_path\r\n\r\n # set face encodings for known faces\r\n self.__known_face_encodings = self.__make_face_encodings(images_path=known_images_path)\r\n print('shape of known_face_encodings = ({}, {})'.format(len(self.__known_face_encodings),\r\n len(self.__known_face_encodings[0])))\r\n\r\n @staticmethod\r\n def __make_face_encodings(images_path):\r\n \"\"\"\r\n face encode情報を生成する。\r\n \"\"\"\r\n face_encodings = []\r\n\r\n for img_path in images_path:\r\n img = face_recognition.load_image_file(img_path)\r\n face_encodings.append(face_recognition.face_encodings(img)[0])\r\n\r\n return face_encodings\r\n\r\n def recognition(self):\r\n \"\"\"\r\n Recognition\r\n \"\"\"\r\n unknown_face_encodings = self.__make_face_encodings(images_path=self.__unknown_images_paths)\r\n print('shape of unknown_face_encodings = ({}, {})'.format(len(unknown_face_encodings),\r\n len(unknown_face_encodings[0])))\r\n\r\n accuracy = 0\r\n wrong = 0\r\n\r\n for face_to_compare in self.__known_face_encodings:\r\n print(face_recognition.face_distance(unknown_face_encodings, face_to_compare))\r\n\r\n for i, unknown_face_encoding in enumerate(unknown_face_encodings):\r\n img_file = osp.basename(self.__unknown_images_paths[i])\r\n results = face_recognition.compare_faces(self.__known_face_encodings,\r\n unknown_face_encoding,\r\n tolerance=FaceRecognitionLib.__tolerance)\r\n\r\n name = \"Unknown\"\r\n\r\n for person in range(len(self.__people)):\r\n if results[person]:\r\n name = self.__people[person]\r\n break\r\n\r\n if name in img_file:\r\n accuracy += 1\r\n else:\r\n wrong += 1\r\n\r\n print(\"Found {} in the photo {}\".format(name, img_file))\r\n\r\n print('accuracy = {}, wrong = {}'.format(accuracy, wrong))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n face_recognition_lib = FaceRecognitionLib()\r\n face_recognition_lib.recognition()\r\n\r\n\r\n\r\n",
"step-ids": [
3,
4,
5,
8,
9
]
}
|
[
3,
4,
5,
8,
9
] |
from keras.preprocessing.text import text_to_word_sequence
import os
# keras NLP tools filter out certain tokens by default
# this function replaces the default with a smaller set of things to filter out
def filter_not_punctuation():
return '"#$%&()*+-/:;<=>@[\\]^_`{|}~\t\n'
def get_first_n_words(text, n):
string_sequence = text_to_word_sequence(text, filters=filter_not_punctuation())
truncated_string = ''
for word in string_sequence[:n]:
truncated_string = truncated_string + word + ' '
return truncated_string
# gets text data from files with only maxlen words from each file. Gets whole file if maxlen is None
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
|
normal
|
{
"blob_id": "365e2059d5ed3d7f8d9dbb4e44f563b79d68b087",
"index": 1856,
"step-1": "<mask token>\n\n\ndef get_labelled_data_from_directories(data_dir, maxlen=None):\n texts = []\n labels_index = {}\n labels = []\n for name in sorted(os.listdir(data_dir)):\n path = os.path.join(data_dir, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n f = open(fpath)\n t = f.read()\n if maxlen is not None:\n t = get_first_n_words(t, maxlen)\n texts.append(t)\n f.close()\n labels.append(label_id)\n return texts, labels_index, labels\n",
"step-2": "<mask token>\n\n\ndef filter_not_punctuation():\n return '\"#$%&()*+-/:;<=>@[\\\\]^_`{|}~\\t\\n'\n\n\n<mask token>\n\n\ndef get_labelled_data_from_directories(data_dir, maxlen=None):\n texts = []\n labels_index = {}\n labels = []\n for name in sorted(os.listdir(data_dir)):\n path = os.path.join(data_dir, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n f = open(fpath)\n t = f.read()\n if maxlen is not None:\n t = get_first_n_words(t, maxlen)\n texts.append(t)\n f.close()\n labels.append(label_id)\n return texts, labels_index, labels\n",
"step-3": "<mask token>\n\n\ndef filter_not_punctuation():\n return '\"#$%&()*+-/:;<=>@[\\\\]^_`{|}~\\t\\n'\n\n\ndef get_first_n_words(text, n):\n string_sequence = text_to_word_sequence(text, filters=\n filter_not_punctuation())\n truncated_string = ''\n for word in string_sequence[:n]:\n truncated_string = truncated_string + word + ' '\n return truncated_string\n\n\ndef get_labelled_data_from_directories(data_dir, maxlen=None):\n texts = []\n labels_index = {}\n labels = []\n for name in sorted(os.listdir(data_dir)):\n path = os.path.join(data_dir, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n f = open(fpath)\n t = f.read()\n if maxlen is not None:\n t = get_first_n_words(t, maxlen)\n texts.append(t)\n f.close()\n labels.append(label_id)\n return texts, labels_index, labels\n",
"step-4": "from keras.preprocessing.text import text_to_word_sequence\nimport os\n\n\ndef filter_not_punctuation():\n return '\"#$%&()*+-/:;<=>@[\\\\]^_`{|}~\\t\\n'\n\n\ndef get_first_n_words(text, n):\n string_sequence = text_to_word_sequence(text, filters=\n filter_not_punctuation())\n truncated_string = ''\n for word in string_sequence[:n]:\n truncated_string = truncated_string + word + ' '\n return truncated_string\n\n\ndef get_labelled_data_from_directories(data_dir, maxlen=None):\n texts = []\n labels_index = {}\n labels = []\n for name in sorted(os.listdir(data_dir)):\n path = os.path.join(data_dir, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n f = open(fpath)\n t = f.read()\n if maxlen is not None:\n t = get_first_n_words(t, maxlen)\n texts.append(t)\n f.close()\n labels.append(label_id)\n return texts, labels_index, labels\n",
"step-5": "from keras.preprocessing.text import text_to_word_sequence\nimport os\n\n\n# keras NLP tools filter out certain tokens by default\n# this function replaces the default with a smaller set of things to filter out\ndef filter_not_punctuation():\n return '\"#$%&()*+-/:;<=>@[\\\\]^_`{|}~\\t\\n'\n\n\ndef get_first_n_words(text, n):\n string_sequence = text_to_word_sequence(text, filters=filter_not_punctuation())\n truncated_string = ''\n for word in string_sequence[:n]:\n truncated_string = truncated_string + word + ' '\n return truncated_string\n\n\n\n\n# gets text data from files with only maxlen words from each file. Gets whole file if maxlen is None\ndef get_labelled_data_from_directories(data_dir, maxlen=None):\n texts = [] # list of text samples\n labels_index = {} # dictionary mapping label name to numeric id\n labels = [] # list of label ids\n for name in sorted(os.listdir(data_dir)):\n path = os.path.join(data_dir, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n f = open(fpath)\n t = f.read()\n if maxlen is not None:\n t = get_first_n_words(t, maxlen)\n texts.append(t)\n f.close()\n labels.append(label_id)\n return texts, labels_index, labels\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import datetime
import time
import rfc822
from django.conf import settings
from urllib2 import Request, urlopen, URLError, HTTPError
from urllib import urlencode
import re
import string
try:
import django.utils.simplejson as json
except:
import json
from django.core.cache import cache
from tagging.models import Tag
from foodtruck.models import *
from foodtruck.tokens import *
import oauth2 as oauth
def fetch_json(url, service, list_key=None):
fetched = urlopen(url).read()
data = json.loads(fetched)
if list_key:
data = data[list_key]
return data
def oauth_req(url, key, secret, http_method="GET", post_body=None,http_headers=None):
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=key, secret=secret)
client = oauth.Client(consumer, token)
resp, content = client.request(
url,
method=http_method,
body=post_body,
headers=http_headers,
force_auth_header=True
)
return content
def get_all_tweets():
from dateutil.parser import parse, tz
url = LIST_URL
HERE = tz.tzlocal()
if cache.get('truck_tweets'):
tweets = cache.get('truck_tweets')
else:
tweets = []
all_tweets = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
data = json.loads(all_tweets)
for t in data:
m = dict(
name = t['user']['screen_name'],
pic_url = t['user']['profile_image_url'],
text = t['text'],
timestamp = parse(t['created_at']).astimezone(HERE),
url = 'http://twitter.com/'+t['user']['screen_name']+'/statuses/'+str(t['id']),
)
tweets += [m]
cache.set('truck_tweets',tweets, 62)
return tweets
def filter_trucks(hood):
tweets = get_all_tweets()
n = Hood.objects.get(id=hood)
tags = n.tags.all()
filtered = {'hood':n.name, 'tags':tags}
filtered['tweets'] = []
for t in tweets:
for w in tags:
if string.find(t['text'].lower(), w.name.lower()) > 0:
filtered['tweets'] += [t]
break
cache.set((('filtered_%s' % hood)), filtered, 62)
return filtered
def get_truck_names():
p = open('truck.cursor','r')
try: last_cursor = int(p.read())
except: last_cursor=1353949495935930905 # this is just the last cursor number i looked up, to save on API calls -- can change.
p.close()
url = LIST_MEMBERS_URL
get_truck_list = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks = truck_list['users']
cursor = truck_list['next_cursor']
f = open('truck.cursor','w')
f.write(str(cursor))
f.close
while cursor > last_cursor:
truck_url = LIST_MEMBERS_URL +'?cursor=' + str(cursor)
get_truck_list = oauth_req(truck_url,OAUTH_TOKEN,OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks += truck_list['users']
cursor = truck_list['next_cursor']
for truck in all_trucks:
description=truck['description'] or ''
truck_url= truck['url'] or 'http://twitter.com/'+truck['screen_name']
profile_icon= truck['profile_image_url'] or ''
real_name=truck['name'] or truck['screen_name']
t = Truck.objects.get_or_create(id_str__exact=truck['id_str'], defaults = {'name':truck['screen_name'], 'description':description, 'profile_icon':profile_icon, 'truck_url':truck_url, 'geo_enabled':truck['geo_enabled'], 'real_name':real_name, 'id_str':truck['id_str']})
if __name__=='__main__':
import sys
try:
func = sys.argv[1]
except: func = None
if func:
try:
exec 'print %s' % func
except:
print "Error: incorrect syntax '%s'" % func
else: print "Please name your function"
|
normal
|
{
"blob_id": "f720eaf1ea96ccc70730e8ba1513e1a2bb95d29d",
"index": 4842,
"step-1": "import datetime\nimport time\nimport rfc822\nfrom django.conf import settings\nfrom urllib2 import Request, urlopen, URLError, HTTPError\nfrom urllib import urlencode\nimport re \nimport string\ntry:\n import django.utils.simplejson as json\nexcept:\n import json\nfrom django.core.cache import cache\n\nfrom tagging.models import Tag\n\nfrom foodtruck.models import *\nfrom foodtruck.tokens import *\n\nimport oauth2 as oauth\n\ndef fetch_json(url, service, list_key=None):\n fetched = urlopen(url).read()\n data = json.loads(fetched)\n if list_key:\n data = data[list_key]\n return data\n \ndef oauth_req(url, key, secret, http_method=\"GET\", post_body=None,http_headers=None):\n\tconsumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n\ttoken = oauth.Token(key=key, secret=secret)\n\tclient = oauth.Client(consumer, token)\n\tresp, content = client.request(\n\t\turl,\n\t\tmethod=http_method,\n\t\tbody=post_body,\n\t\theaders=http_headers,\n\t\tforce_auth_header=True\n\t)\n\treturn content\n\ndef get_all_tweets():\n from dateutil.parser import parse, tz\n url = LIST_URL\n HERE = tz.tzlocal()\n if cache.get('truck_tweets'):\n tweets = cache.get('truck_tweets')\n else:\n tweets = []\n all_tweets = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n data = json.loads(all_tweets)\n for t in data:\n m = dict(\n name = t['user']['screen_name'],\n pic_url = t['user']['profile_image_url'],\n text = t['text'],\n timestamp = parse(t['created_at']).astimezone(HERE),\n url = 'http://twitter.com/'+t['user']['screen_name']+'/statuses/'+str(t['id']),\n ) \n tweets += [m]\n cache.set('truck_tweets',tweets, 62)\n return tweets \n\n\ndef filter_trucks(hood):\n tweets = get_all_tweets() \n n = Hood.objects.get(id=hood)\n tags = n.tags.all()\n filtered = {'hood':n.name, 'tags':tags}\n filtered['tweets'] = []\n for t in tweets:\n for w in tags:\n if string.find(t['text'].lower(), w.name.lower()) > 0: \n filtered['tweets'] += [t]\n break\n cache.set((('filtered_%s' % hood)), filtered, 62)\n return filtered\n \n \ndef get_truck_names():\n p = open('truck.cursor','r')\n try: last_cursor = int(p.read())\n except: last_cursor=1353949495935930905 # this is just the last cursor number i looked up, to save on API calls -- can change.\n p.close()\n\n url = LIST_MEMBERS_URL\n get_truck_list = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n truck_list = json.loads(get_truck_list)\n all_trucks = truck_list['users']\n cursor = truck_list['next_cursor']\n f = open('truck.cursor','w')\n f.write(str(cursor))\n f.close\n\n while cursor > last_cursor:\n truck_url = LIST_MEMBERS_URL +'?cursor=' + str(cursor)\n get_truck_list = oauth_req(truck_url,OAUTH_TOKEN,OAUTH_TOKEN_SECRET)\n truck_list = json.loads(get_truck_list)\n all_trucks += truck_list['users']\n cursor = truck_list['next_cursor']\n for truck in all_trucks:\n description=truck['description'] or ''\n truck_url= truck['url'] or 'http://twitter.com/'+truck['screen_name']\n profile_icon= truck['profile_image_url'] or ''\n real_name=truck['name'] or truck['screen_name']\n t = Truck.objects.get_or_create(id_str__exact=truck['id_str'], defaults = {'name':truck['screen_name'], 'description':description, 'profile_icon':profile_icon, 'truck_url':truck_url, 'geo_enabled':truck['geo_enabled'], 'real_name':real_name, 'id_str':truck['id_str']})\n\n\nif __name__=='__main__':\n import sys\n try:\n func = sys.argv[1]\n except: func = None\n if func:\n try:\n exec 'print %s' % func\n except:\n print \"Error: incorrect syntax '%s'\" % func\n else: print \"Please name your function\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# Copyright (c) 2019, University of Stuttgart
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Jim Mainprice on Wed January 22 2019
from demos_common_imports import *
from pyrieef.geometry.workspace import *
from pyrieef.geometry import heat_diffusion
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
import matplotlib.pyplot as plt
ROWS = 1
COLS = 2
heat_diffusion.NB_POINTS = 101
heat_diffusion.TIME_FACTOR = 50
heat_diffusion.ALGORITHM = "forward"
iterations = 10
workspace = Workspace()
source = [0, 0]
renderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)
U = heat_diffusion.heat_diffusion(workspace, source, iterations)
U_e = heat_diffusion.compare_with_kernel(U[-1], 9.020E-03, workspace)
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(
U[-1] if i == 0 else U_e,
interpolate="none", color_style=plt.cm.gray)
renderer.show()
|
normal
|
{
"blob_id": "007cce815f3ad4e47593ff00ff2e73d5d9961d9e",
"index": 3211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-3": "<mask token>\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = 'forward'\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-4": "from demos_common_imports import *\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry import heat_diffusion\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nimport matplotlib.pyplot as plt\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = 'forward'\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-5": "#!/usr/bin/env python\n\n# Copyright (c) 2019, University of Stuttgart\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright\n# notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n#\n# Jim Mainprice on Wed January 22 2019\n\nfrom demos_common_imports import *\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry import heat_diffusion\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nimport matplotlib.pyplot as plt\n\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = \"forward\"\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 9.020E-03, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(\n U[-1] if i == 0 else U_e,\n interpolate=\"none\", color_style=plt.cm.gray)\nrenderer.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import random
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# ეს არის კოდი, რომელიც ქმნის აბსურდს
import random
def get_all_words():
words = [] # ეს არის ლისტი ყველა ისეთი სიტყვის
with open("poem.txt") as poem: # რომლის ასოების სიმრავლეც 6-ზე ნაკლებია
for line in poem: # გრძელ სიტყვებთან თამაში რთული აღმოჩნდა
line = line.strip().split(" ")
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
# რენდომად ავარჩიოთ სიტყვა, რომელსაც მომხმარებელი გამოიცნობს
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
# რენდომად არჩეული სიტყვა გადავაქციოთ ლისტად და ლისტში შემავალი ელემენტები რენდომად დავაგენერიროთ
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = "".join(word_as_list)
print(f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}')
answer = input("შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ")
if answer.strip().upper() == "Q":
print("მადლობა თამაშისთვის და გახსოვდეს:"
"\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'")
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!")
def main():
words_to_play = get_all_words()
print('ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.'
'\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \n')
game(words_to_play)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "881d0c0808d8c0e656cdbf49450367553c100630",
"index": 2100,
"step-1": "<mask token>\n\n\ndef get_all_words():\n words = []\n with open('poem.txt') as poem:\n for line in poem:\n line = line.strip().split(' ')\n for word in line:\n if len(word) < 6:\n words.append(word)\n return words\n\n\ndef game(words):\n while True:\n random_word_index = random.randint(0, len(words))\n word_as_list = []\n random_word_normal = words[random_word_index]\n for x in random_word_normal:\n word_as_list.insert(random.randint(0, len(word_as_list)), x)\n random_word_funky = ''.join(word_as_list)\n print(\n f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'\n )\n answer = input(\n 'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')\n if answer.strip().upper() == 'Q':\n print(\n \"\"\"მადლობა თამაშისთვის და გახსოვდეს:\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'\"\"\"\n )\n break\n if random_word_normal == answer.strip():\n print(f\"ყოჩაღ, '{answer}' სწორი პასუხია!\")\n else:\n print(\n f\"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_all_words():\n words = []\n with open('poem.txt') as poem:\n for line in poem:\n line = line.strip().split(' ')\n for word in line:\n if len(word) < 6:\n words.append(word)\n return words\n\n\ndef game(words):\n while True:\n random_word_index = random.randint(0, len(words))\n word_as_list = []\n random_word_normal = words[random_word_index]\n for x in random_word_normal:\n word_as_list.insert(random.randint(0, len(word_as_list)), x)\n random_word_funky = ''.join(word_as_list)\n print(\n f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'\n )\n answer = input(\n 'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')\n if answer.strip().upper() == 'Q':\n print(\n \"\"\"მადლობა თამაშისთვის და გახსოვდეს:\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'\"\"\"\n )\n break\n if random_word_normal == answer.strip():\n print(f\"ყოჩაღ, '{answer}' სწორი პასუხია!\")\n else:\n print(\n f\"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!\"\n )\n\n\ndef main():\n words_to_play = get_all_words()\n print(\n \"\"\"ეკრანზე გამოისახება \"ვეფხისტყაოსნიდან\" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \n\"\"\"\n )\n game(words_to_play)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_all_words():\n words = []\n with open('poem.txt') as poem:\n for line in poem:\n line = line.strip().split(' ')\n for word in line:\n if len(word) < 6:\n words.append(word)\n return words\n\n\ndef game(words):\n while True:\n random_word_index = random.randint(0, len(words))\n word_as_list = []\n random_word_normal = words[random_word_index]\n for x in random_word_normal:\n word_as_list.insert(random.randint(0, len(word_as_list)), x)\n random_word_funky = ''.join(word_as_list)\n print(\n f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'\n )\n answer = input(\n 'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')\n if answer.strip().upper() == 'Q':\n print(\n \"\"\"მადლობა თამაშისთვის და გახსოვდეს:\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'\"\"\"\n )\n break\n if random_word_normal == answer.strip():\n print(f\"ყოჩაღ, '{answer}' სწორი პასუხია!\")\n else:\n print(\n f\"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!\"\n )\n\n\ndef main():\n words_to_play = get_all_words()\n print(\n \"\"\"ეკრანზე გამოისახება \"ვეფხისტყაოსნიდან\" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \n\"\"\"\n )\n game(words_to_play)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import random\n\n\ndef get_all_words():\n words = []\n with open('poem.txt') as poem:\n for line in poem:\n line = line.strip().split(' ')\n for word in line:\n if len(word) < 6:\n words.append(word)\n return words\n\n\ndef game(words):\n while True:\n random_word_index = random.randint(0, len(words))\n word_as_list = []\n random_word_normal = words[random_word_index]\n for x in random_word_normal:\n word_as_list.insert(random.randint(0, len(word_as_list)), x)\n random_word_funky = ''.join(word_as_list)\n print(\n f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'\n )\n answer = input(\n 'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')\n if answer.strip().upper() == 'Q':\n print(\n \"\"\"მადლობა თამაშისთვის და გახსოვდეს:\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'\"\"\"\n )\n break\n if random_word_normal == answer.strip():\n print(f\"ყოჩაღ, '{answer}' სწორი პასუხია!\")\n else:\n print(\n f\"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!\"\n )\n\n\ndef main():\n words_to_play = get_all_words()\n print(\n \"\"\"ეკრანზე გამოისახება \"ვეფხისტყაოსნიდან\" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \n\"\"\"\n )\n game(words_to_play)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# ეს არის კოდი, რომელიც ქმნის აბსურდს\nimport random\n\n\ndef get_all_words():\n words = [] # ეს არის ლისტი ყველა ისეთი სიტყვის\n with open(\"poem.txt\") as poem: # რომლის ასოების სიმრავლეც 6-ზე ნაკლებია\n for line in poem: # გრძელ სიტყვებთან თამაში რთული აღმოჩნდა\n line = line.strip().split(\" \")\n for word in line:\n if len(word) < 6:\n words.append(word)\n return words\n\n\ndef game(words):\n while True:\n # რენდომად ავარჩიოთ სიტყვა, რომელსაც მომხმარებელი გამოიცნობს\n random_word_index = random.randint(0, len(words))\n word_as_list = []\n random_word_normal = words[random_word_index]\n\n # რენდომად არჩეული სიტყვა გადავაქციოთ ლისტად და ლისტში შემავალი ელემენტები რენდომად დავაგენერიროთ\n for x in random_word_normal:\n word_as_list.insert(random.randint(0, len(word_as_list)), x)\n random_word_funky = \"\".join(word_as_list)\n\n print(f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}')\n answer = input(\"შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: \")\n\n if answer.strip().upper() == \"Q\":\n print(\"მადლობა თამაშისთვის და გახსოვდეს:\"\n \"\\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'\")\n break\n if random_word_normal == answer.strip():\n print(f\"ყოჩაღ, '{answer}' სწორი პასუხია!\")\n else:\n print(f\"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!\")\n\n\ndef main():\n words_to_play = get_all_words()\n print('ეკრანზე გამოისახება \"ვეფხისტყაოსნიდან\" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.'\n '\\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \\n')\n game(words_to_play)\n\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def lambda_handler(event, context):
if event['function'] == 'tasklist':
msg = tasklist(name)
if event['function'] == 'activity':
msg = activity(name)
return
<|reserved_special_token_0|>
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==
pjt['id']]
l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==
sect['id']]
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
return
def slack_notify():
title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\n'
slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',
'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{
'value': msg}]}]}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TDIAPI.sync()
<|reserved_special_token_0|>
def lambda_handler(event, context):
if event['function'] == 'tasklist':
msg = tasklist(name)
if event['function'] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime('%Y-%m-%d')
"""
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
"""
todoist_times = datetime.datetime.strptime(events['event_date'],
'%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)
todoist_date = str(todoist_times.strftime('%Y-%m-%d'))
if events['event_type'
] == 'completed' and todoist_date == today and events[
'parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==
pjt['id']]
l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==
sect['id']]
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
return
def slack_notify():
title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\n'
slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',
'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{
'value': msg}]}]}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)
TDIAPI.sync()
name = os.environ['TODOIST_PJT']
def lambda_handler(event, context):
if event['function'] == 'tasklist':
msg = tasklist(name)
if event['function'] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime('%Y-%m-%d')
"""
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
"""
todoist_times = datetime.datetime.strptime(events['event_date'],
'%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)
todoist_date = str(todoist_times.strftime('%Y-%m-%d'))
if events['event_type'
] == 'completed' and todoist_date == today and events[
'parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==
pjt['id']]
l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==
sect['id']]
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
return
def slack_notify():
title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\n'
slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',
'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{
'value': msg}]}]}
<|reserved_special_token_1|>
import datetime
import json
import requests
import os
import re
import sys
from todoist.api import TodoistAPI
TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)
TDIAPI.sync()
name = os.environ['TODOIST_PJT']
def lambda_handler(event, context):
if event['function'] == 'tasklist':
msg = tasklist(name)
if event['function'] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime('%Y-%m-%d')
"""
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
"""
todoist_times = datetime.datetime.strptime(events['event_date'],
'%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)
todoist_date = str(todoist_times.strftime('%Y-%m-%d'))
if events['event_type'
] == 'completed' and todoist_date == today and events[
'parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==
pjt['id']]
l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==
sect['id']]
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
return
def slack_notify():
title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\n'
slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',
'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{
'value': msg}]}]}
<|reserved_special_token_1|>
# coding: utf-8
import datetime
import json
import requests
import os
import re
import sys
from todoist.api import TodoistAPI
#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
#SLACK_POSTURL = os.environ['SLACK_POSTURL']
TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)
TDIAPI.sync()
name = os.environ['TODOIST_PJT']
def lambda_handler(event, context):
if event["function"] == 'tasklist':
msg = tasklist(name)
if event["function"] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime("%Y-%m-%d")
'''
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
'''
todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)
todoist_date = str(todoist_times.strftime("%Y-%m-%d"))
if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print("プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。")
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]
l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]
#print('+++')
#print(l_pjt_id)
#print(l_content)
#print(l_sec_name[0])
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
#if item['checked'] == 0 and item['project_id'] == tasks_project_id:
#taskcontent = '- ' + item['content']
#slackmessage.append(taskcontent)
#print(taskcontent)
#print(slackmessage)
#message = '\n'.join(slackmessage)
return
def slack_notify():
title = "*[定期通知] プロジェクト " + name + " のタスクリスト*\n"
slack_message = {
'channel': SLACK_CHANNEL,
'icon_emoji': ":todoist:",
'text': title,
"attachments": [
{
"color": "#36a64f",
"fields": [
{
"value": msg,
},
],
}
]
}
#requests.post(SLACK_POSTURL, data=json.dumps(slack_message))
|
flexible
|
{
"blob_id": "3c3d45f0844496b8d623286b36a4935a154f410a",
"index": 4133,
"step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\n<mask token>\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-2": "<mask token>\nTDIAPI.sync()\n<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-3": "<mask token>\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-4": "import datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n",
"step-5": "# coding: utf-8\n\nimport datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\n\n#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']\n#SLACK_POSTURL = os.environ['SLACK_POSTURL']\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\ndef lambda_handler(event, context):\n if event[\"function\"] == 'tasklist':\n msg = tasklist(name)\n if event[\"function\"] == 'activity':\n msg = activity(name)\n return\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n '''\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n '''\n todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)\n todoist_date = str(todoist_times.strftime(\"%Y-%m-%d\"))\n\n if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n\n print(event_list)\n return event_list\n\ndef tasklist(name):\n\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n\n try:\n tasks_project_id\n except NameError:\n print(\"プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。\")\n return\n\n print(labels)\n sys.exit()\n\n for item in items:\n l_content = item['content']\n l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]\n l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]\n #print('+++')\n #print(l_pjt_id)\n #print(l_content)\n #print(l_sec_name[0])\n\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n #if item['checked'] == 0 and item['project_id'] == tasks_project_id:\n\n #taskcontent = '- ' + item['content']\n #slackmessage.append(taskcontent)\n #print(taskcontent)\n #print(slackmessage)\n #message = '\\n'.join(slackmessage)\n return\n\ndef slack_notify():\n title = \"*[定期通知] プロジェクト \" + name + \" のタスクリスト*\\n\"\n slack_message = {\n 'channel': SLACK_CHANNEL,\n 'icon_emoji': \":todoist:\",\n 'text': title,\n \"attachments\": [\n {\n \"color\": \"#36a64f\",\n \"fields\": [\n {\n \"value\": msg,\n },\n ],\n }\n ]\n }\n #requests.post(SLACK_POSTURL, data=json.dumps(slack_message))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import pandas as pd
import numpy as np
import math
from sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import pairwise_distances
class KMeans():
def __init__(self, k = 5, max_iters = 100, random_seed = 42):
self.k = k
self.max_iters = max_iters
# Set random seed
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids)**2, axis = 1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')
cluster_labels = np.argmin(cluster_distances, axis = 1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
# Get all data points of a cluster
X_cluster = X[cluster_labels == cluster]
# Update the cluster's centroid
cluster_mean = np.mean(X_cluster, axis = 0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
# Initialise random centroids
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
# Assign clusters to data
cluster_labels = self._assign_clusters(X)
# Update centroids
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
# Load data
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size = 0.1)
# Fit model
model = KMeans(k = 5)
model.fit(X_train)
# Predict
y_pred = model.predict(X_test)
print(y_pred)
|
normal
|
{
"blob_id": "d267c8cbe51fb1bacc9404a1385f1daa4a0db7f2",
"index": 884,
"step-1": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n <mask token>\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size=0.1)\nmodel = KMeans(k=5)\nmodel.fit(X_train)\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import pairwise_distances\n\n\nclass KMeans():\n\n def __init__(self, k = 5, max_iters = 100, random_seed = 42):\n self.k = k\n self.max_iters = max_iters\n\n # Set random seed\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids)**2, axis = 1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis = 1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n\n # Get all data points of a cluster\n X_cluster = X[cluster_labels == cluster]\n\n # Update the cluster's centroid\n cluster_mean = np.mean(X_cluster, axis = 0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n\n # Initialise random centroids\n self._initialise_centroids(X)\n\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n\n # Assign clusters to data\n cluster_labels = self._assign_clusters(X)\n\n # Update centroids\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n# Load data\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size = 0.1)\n\n# Fit model\nmodel = KMeans(k = 5)\nmodel.fit(X_train)\n\n# Predict\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
from Smooth import smoothing
def n_grams(unigramsFile, bigramsFile, parameterization, sentences):
words = []
param = []
unigrams = []
bigrams = []
with open(parameterization) as p: #Parametrization file
data = p.read().split()
word = data[0]
param.append(data[1])
param.append(data[2])
param.append(data[4])
#print("PARAM: ", param)# Debug print
with open(unigramsFile) as u: #Unigrams and respective values file
for line in u.readlines():
values = line.split()
if (values[0] in param):
unigrams.append(values)
#print("UNIGRAMS: ", unigrams)# Debug print
with open(bigramsFile) as b: #Bigrams and respective values file
for line in b.readlines():
values = line.split()
if (values[0] in param or values[1] in param):
bigrams.append(values)
#print("BIGRAMS: ", bigrams)# Debug print
with open(sentences) as f: #Text with sentences file
for line in f.readlines():
sentence = line.split()
index = sentence.index(word)
aux = []
if (index > 0):
aux.append(sentence[index-1])
aux.append(sentence[index])
if (index + 1 < len(sentences)):
aux.append(sentence[index+1])
words.append(aux)
#print("WORDS: ", words)# Debug print
for w in words:
bigram1 = 0
bigram2 = 0
option1 = w
print(w)
index = option1.index(word)
option1[index] = param[1]
option2 = w
index = option2.index(word)
option2[index] = param[2]
for unigram in unigrams:
if((option1[0] or option1[1] or option1[2]) in unigram):
unigram1 += float(unigram[1])
elif((option2[0] or option2[1] or option2[2]) in unigram):
unigram2 += float(unigram[1])
for bigram in bigrams:
if ((option1[0:1] or option1[1:2]) in bigram):
bigram1 += float(bigram[2])
elif (option2[0:1] in bigram or option2[1:2] in bigram):
bigram2 += float(bigram[2])
if (((unigram1 > unigram2) and (unigram1 > bigram2)) or ((bigram1 > unigram2) and (bigram1 > bigram2))):
lema = option1
elif (((unigram2 > unigram1) and (unigram2 > bigram1)) or ((bigram2 > unigram1) and (bigram2 > bigram1))):
lema = option2
print("O lema mais provavel para" + str(w) + "e: " + str(lema)) #lema
#print("SENTENCE: ", sentence)# Debug print
|
normal
|
{
"blob_id": "87c200796e1fac508a43e899c0ed53878b8c1d88",
"index": 5244,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef n_grams(unigramsFile, bigramsFile, parameterization, sentences):\n words = []\n param = []\n unigrams = []\n bigrams = []\n with open(parameterization) as p:\n data = p.read().split()\n word = data[0]\n param.append(data[1])\n param.append(data[2])\n param.append(data[4])\n with open(unigramsFile) as u:\n for line in u.readlines():\n values = line.split()\n if values[0] in param:\n unigrams.append(values)\n with open(bigramsFile) as b:\n for line in b.readlines():\n values = line.split()\n if values[0] in param or values[1] in param:\n bigrams.append(values)\n with open(sentences) as f:\n for line in f.readlines():\n sentence = line.split()\n index = sentence.index(word)\n aux = []\n if index > 0:\n aux.append(sentence[index - 1])\n aux.append(sentence[index])\n if index + 1 < len(sentences):\n aux.append(sentence[index + 1])\n words.append(aux)\n for w in words:\n bigram1 = 0\n bigram2 = 0\n option1 = w\n print(w)\n index = option1.index(word)\n option1[index] = param[1]\n option2 = w\n index = option2.index(word)\n option2[index] = param[2]\n for unigram in unigrams:\n if (option1[0] or option1[1] or option1[2]) in unigram:\n unigram1 += float(unigram[1])\n elif (option2[0] or option2[1] or option2[2]) in unigram:\n unigram2 += float(unigram[1])\n for bigram in bigrams:\n if (option1[0:1] or option1[1:2]) in bigram:\n bigram1 += float(bigram[2])\n elif option2[0:1] in bigram or option2[1:2] in bigram:\n bigram2 += float(bigram[2])\n if (unigram1 > unigram2 and unigram1 > bigram2 or bigram1 >\n unigram2 and bigram1 > bigram2):\n lema = option1\n elif unigram2 > unigram1 and unigram2 > bigram1 or bigram2 > unigram1 and bigram2 > bigram1:\n lema = option2\n print('O lema mais provavel para' + str(w) + 'e: ' + str(lema))\n",
"step-3": "from Smooth import smoothing\n\n\ndef n_grams(unigramsFile, bigramsFile, parameterization, sentences):\n words = []\n param = []\n unigrams = []\n bigrams = []\n with open(parameterization) as p:\n data = p.read().split()\n word = data[0]\n param.append(data[1])\n param.append(data[2])\n param.append(data[4])\n with open(unigramsFile) as u:\n for line in u.readlines():\n values = line.split()\n if values[0] in param:\n unigrams.append(values)\n with open(bigramsFile) as b:\n for line in b.readlines():\n values = line.split()\n if values[0] in param or values[1] in param:\n bigrams.append(values)\n with open(sentences) as f:\n for line in f.readlines():\n sentence = line.split()\n index = sentence.index(word)\n aux = []\n if index > 0:\n aux.append(sentence[index - 1])\n aux.append(sentence[index])\n if index + 1 < len(sentences):\n aux.append(sentence[index + 1])\n words.append(aux)\n for w in words:\n bigram1 = 0\n bigram2 = 0\n option1 = w\n print(w)\n index = option1.index(word)\n option1[index] = param[1]\n option2 = w\n index = option2.index(word)\n option2[index] = param[2]\n for unigram in unigrams:\n if (option1[0] or option1[1] or option1[2]) in unigram:\n unigram1 += float(unigram[1])\n elif (option2[0] or option2[1] or option2[2]) in unigram:\n unigram2 += float(unigram[1])\n for bigram in bigrams:\n if (option1[0:1] or option1[1:2]) in bigram:\n bigram1 += float(bigram[2])\n elif option2[0:1] in bigram or option2[1:2] in bigram:\n bigram2 += float(bigram[2])\n if (unigram1 > unigram2 and unigram1 > bigram2 or bigram1 >\n unigram2 and bigram1 > bigram2):\n lema = option1\n elif unigram2 > unigram1 and unigram2 > bigram1 or bigram2 > unigram1 and bigram2 > bigram1:\n lema = option2\n print('O lema mais provavel para' + str(w) + 'e: ' + str(lema))\n",
"step-4": "from Smooth import smoothing\n\ndef n_grams(unigramsFile, bigramsFile, parameterization, sentences):\n words = []\n param = []\n unigrams = []\n bigrams = []\n\n with open(parameterization) as p: #Parametrization file\n data = p.read().split()\n word = data[0]\n param.append(data[1])\n param.append(data[2])\n param.append(data[4])\n #print(\"PARAM: \", param)# Debug print\n\n with open(unigramsFile) as u: #Unigrams and respective values file\n for line in u.readlines():\n values = line.split()\n if (values[0] in param):\n unigrams.append(values)\n #print(\"UNIGRAMS: \", unigrams)# Debug print\n\n with open(bigramsFile) as b: #Bigrams and respective values file\n for line in b.readlines():\n values = line.split()\n if (values[0] in param or values[1] in param):\n bigrams.append(values)\n #print(\"BIGRAMS: \", bigrams)# Debug print\n\n with open(sentences) as f: #Text with sentences file\n for line in f.readlines():\n sentence = line.split()\n index = sentence.index(word)\n aux = []\n if (index > 0):\n aux.append(sentence[index-1])\n aux.append(sentence[index])\n if (index + 1 < len(sentences)):\n aux.append(sentence[index+1])\n words.append(aux)\n #print(\"WORDS: \", words)# Debug print\n\n for w in words:\n bigram1 = 0\n bigram2 = 0\n option1 = w\n print(w)\n index = option1.index(word)\n option1[index] = param[1]\n option2 = w\n index = option2.index(word)\n option2[index] = param[2]\n for unigram in unigrams:\n if((option1[0] or option1[1] or option1[2]) in unigram):\n unigram1 += float(unigram[1])\n elif((option2[0] or option2[1] or option2[2]) in unigram):\n unigram2 += float(unigram[1])\n for bigram in bigrams:\n if ((option1[0:1] or option1[1:2]) in bigram):\n bigram1 += float(bigram[2])\n elif (option2[0:1] in bigram or option2[1:2] in bigram):\n bigram2 += float(bigram[2])\n if (((unigram1 > unigram2) and (unigram1 > bigram2)) or ((bigram1 > unigram2) and (bigram1 > bigram2))):\n lema = option1\n elif (((unigram2 > unigram1) and (unigram2 > bigram1)) or ((bigram2 > unigram1) and (bigram2 > bigram1))):\n lema = option2\n print(\"O lema mais provavel para\" + str(w) + \"e: \" + str(lema)) #lema\n #print(\"SENTENCE: \", sentence)# Debug print\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Goal: Let's Review
# Enter your code here. Read input from STDIN. Print output to STDOUT
T = int(input())
# Iterate through each inputted string
for i in range(T):
even = ''
odd = ''
s = str(input())
for i in range(len(s)):
if (i % 2 == 0):
even = even + s[i]
else:
odd = odd + s[i]
print(even, odd)
|
normal
|
{
"blob_id": "f45313e4e8f3ecba0c7dc0288d9d5ec4e26f0ba6",
"index": 5284,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(T):\n even = ''\n odd = ''\n s = str(input())\n for i in range(len(s)):\n if i % 2 == 0:\n even = even + s[i]\n else:\n odd = odd + s[i]\n print(even, odd)\n",
"step-3": "T = int(input())\nfor i in range(T):\n even = ''\n odd = ''\n s = str(input())\n for i in range(len(s)):\n if i % 2 == 0:\n even = even + s[i]\n else:\n odd = odd + s[i]\n print(even, odd)\n",
"step-4": "# Goal: Let's Review\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nT = int(input())\n\n# Iterate through each inputted string\n\nfor i in range(T):\n even = ''\n odd = ''\n s = str(input())\n\n for i in range(len(s)):\n if (i % 2 == 0):\n even = even + s[i]\n else:\n odd = odd + s[i]\n\n print(even, odd)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.dispatch import Signal
from djangochannelsrestframework.observer.base_observer import BaseObserver
class Observer(BaseObserver):
def __init__(self, func, signal: Signal = None, kwargs=None):
super().__init__(func)
if kwargs is None:
kwargs = {}
self.signal = signal
self.signal_kwargs = kwargs
self._serializer = None
self.signal.connect(self.handle, **self.signal_kwargs)
def handle(self, signal, *args, **kwargs):
message = self.serialize(signal, *args, **kwargs)
channel_layer = get_channel_layer()
for group_name in self.group_names_for_signal(*args, message=message, **kwargs):
async_to_sync(channel_layer.group_send)(group_name, message)
def group_names(self, *args, **kwargs):
yield "{}-{}-signal-{}".format(
self._uuid,
self.func.__name__.replace("_", "."),
".".join(
arg.lower().replace("_", ".") for arg in self.signal.providing_args
),
)
|
normal
|
{
"blob_id": "66e93295d2797ca9e08100a0a1f28619acb72aa4",
"index": 3397,
"step-1": "<mask token>\n\n\nclass Observer(BaseObserver):\n <mask token>\n\n def handle(self, signal, *args, **kwargs):\n message = self.serialize(signal, *args, **kwargs)\n channel_layer = get_channel_layer()\n for group_name in self.group_names_for_signal(*args, message=\n message, **kwargs):\n async_to_sync(channel_layer.group_send)(group_name, message)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Observer(BaseObserver):\n\n def __init__(self, func, signal: Signal=None, kwargs=None):\n super().__init__(func)\n if kwargs is None:\n kwargs = {}\n self.signal = signal\n self.signal_kwargs = kwargs\n self._serializer = None\n self.signal.connect(self.handle, **self.signal_kwargs)\n\n def handle(self, signal, *args, **kwargs):\n message = self.serialize(signal, *args, **kwargs)\n channel_layer = get_channel_layer()\n for group_name in self.group_names_for_signal(*args, message=\n message, **kwargs):\n async_to_sync(channel_layer.group_send)(group_name, message)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Observer(BaseObserver):\n\n def __init__(self, func, signal: Signal=None, kwargs=None):\n super().__init__(func)\n if kwargs is None:\n kwargs = {}\n self.signal = signal\n self.signal_kwargs = kwargs\n self._serializer = None\n self.signal.connect(self.handle, **self.signal_kwargs)\n\n def handle(self, signal, *args, **kwargs):\n message = self.serialize(signal, *args, **kwargs)\n channel_layer = get_channel_layer()\n for group_name in self.group_names_for_signal(*args, message=\n message, **kwargs):\n async_to_sync(channel_layer.group_send)(group_name, message)\n\n def group_names(self, *args, **kwargs):\n yield '{}-{}-signal-{}'.format(self._uuid, self.func.__name__.\n replace('_', '.'), '.'.join(arg.lower().replace('_', '.') for\n arg in self.signal.providing_args))\n",
"step-4": "from asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.dispatch import Signal\nfrom djangochannelsrestframework.observer.base_observer import BaseObserver\n\n\nclass Observer(BaseObserver):\n\n def __init__(self, func, signal: Signal=None, kwargs=None):\n super().__init__(func)\n if kwargs is None:\n kwargs = {}\n self.signal = signal\n self.signal_kwargs = kwargs\n self._serializer = None\n self.signal.connect(self.handle, **self.signal_kwargs)\n\n def handle(self, signal, *args, **kwargs):\n message = self.serialize(signal, *args, **kwargs)\n channel_layer = get_channel_layer()\n for group_name in self.group_names_for_signal(*args, message=\n message, **kwargs):\n async_to_sync(channel_layer.group_send)(group_name, message)\n\n def group_names(self, *args, **kwargs):\n yield '{}-{}-signal-{}'.format(self._uuid, self.func.__name__.\n replace('_', '.'), '.'.join(arg.lower().replace('_', '.') for\n arg in self.signal.providing_args))\n",
"step-5": "from asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.dispatch import Signal\n\nfrom djangochannelsrestframework.observer.base_observer import BaseObserver\n\n\nclass Observer(BaseObserver):\n def __init__(self, func, signal: Signal = None, kwargs=None):\n super().__init__(func)\n if kwargs is None:\n kwargs = {}\n self.signal = signal\n self.signal_kwargs = kwargs\n self._serializer = None\n self.signal.connect(self.handle, **self.signal_kwargs)\n\n def handle(self, signal, *args, **kwargs):\n message = self.serialize(signal, *args, **kwargs)\n channel_layer = get_channel_layer()\n for group_name in self.group_names_for_signal(*args, message=message, **kwargs):\n async_to_sync(channel_layer.group_send)(group_name, message)\n\n def group_names(self, *args, **kwargs):\n yield \"{}-{}-signal-{}\".format(\n self._uuid,\n self.func.__name__.replace(\"_\", \".\"),\n \".\".join(\n arg.lower().replace(\"_\", \".\") for arg in self.signal.providing_args\n ),\n )\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def play_file(name, loop=0, time=0.0):
try:
file = 'data/audio/' + name
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError:
print('AudioLoading: failed to load ' + name)
try:
file = 'data/audio/error.aud'
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError:
print('Can not load file: ' + name)
raise SystemExit()
<|reserved_special_token_1|>
import pygame
def play_file(name, loop=0, time=0.0):
try:
file = 'data/audio/' + name
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError:
print('AudioLoading: failed to load ' + name)
try:
file = 'data/audio/error.aud'
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError:
print('Can not load file: ' + name)
raise SystemExit()
<|reserved_special_token_1|>
import pygame
def play_file(name,loop=0,time=0.0):
try: #if image exists
file='data/audio/'+name
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError: #if image doesn't exist
print('AudioLoading: failed to load ' + name)
try:
file = 'data/audio/error.aud'
pygame.mixer.music.load(file)
pygame.mixer.music.play(loop, time)
except ZeroDivisionError:
print( 'Can not load file: '+name)
raise SystemExit()
|
flexible
|
{
"blob_id": "98940c898d58917e652fe1514ea758768b048dbc",
"index": 9601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef play_file(name, loop=0, time=0.0):\n try:\n file = 'data/audio/' + name\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError:\n print('AudioLoading: failed to load ' + name)\n try:\n file = 'data/audio/error.aud'\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError:\n print('Can not load file: ' + name)\n raise SystemExit()\n",
"step-3": "import pygame\n\n\ndef play_file(name, loop=0, time=0.0):\n try:\n file = 'data/audio/' + name\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError:\n print('AudioLoading: failed to load ' + name)\n try:\n file = 'data/audio/error.aud'\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError:\n print('Can not load file: ' + name)\n raise SystemExit()\n",
"step-4": "import pygame\n\n\ndef play_file(name,loop=0,time=0.0):\n try: #if image exists\n file='data/audio/'+name\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError: #if image doesn't exist\n print('AudioLoading: failed to load ' + name)\n try:\n file = 'data/audio/error.aud'\n pygame.mixer.music.load(file)\n pygame.mixer.music.play(loop, time)\n except ZeroDivisionError:\n print( 'Can not load file: '+name)\n raise SystemExit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import vector
import matrix
def convert_arg_to_list(arg):
try:
return [float(elem) for elem in arg]
except:
sys.exit("Invalid content inside {}".format(arg))
if __name__ == "__main__":
try:
vector1 = sys.argv[1].split(' ')
vector2 = sys.argv[2].split(' ')
except:
sys.exit("Invalid vectors")
try:
matrix1 = sys.argv[1].split(' ')
matrix2 = sys.argv[2].split(' ')
except:
sys.exit("Invalid Matricies")
print("\nVector tests : ", end='\n\n')
v = vector.Vector(convert_arg_to_list(vector1))
v2 = vector.Vector(convert_arg_to_list(vector2))
#--------------------------------------------#
# Vector part #
v.add(v2)
print("Add :", v)
v.sub(v2)
print("Sub :",v)
v.scale(v2)
print("Scale :",v)
# #
#--------------------------------------------#
print("\nMatrix tests : ", end='\n\n')
#--------------------------------------------#
# Matrix part #
m = matrix.Matrix(convert_arg_to_list(matrix1))
m2 = matrix.Matrix(convert_arg_to_list(matrix2))
m.add(m2)
print("Add :\n", m)
m.sub(m2)
print("\nSub :\n", m)
m.scale(m2)
print("\nScale :\n", m)
#--------------------------------------------#
|
normal
|
{
"blob_id": "347bfb2d8809b55046f698620a690099cc83fb56",
"index": 6433,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_arg_to_list(arg):\n try:\n return [float(elem) for elem in arg]\n except:\n sys.exit('Invalid content inside {}'.format(arg))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef convert_arg_to_list(arg):\n try:\n return [float(elem) for elem in arg]\n except:\n sys.exit('Invalid content inside {}'.format(arg))\n\n\nif __name__ == '__main__':\n try:\n vector1 = sys.argv[1].split(' ')\n vector2 = sys.argv[2].split(' ')\n except:\n sys.exit('Invalid vectors')\n try:\n matrix1 = sys.argv[1].split(' ')\n matrix2 = sys.argv[2].split(' ')\n except:\n sys.exit('Invalid Matricies')\n print('\\nVector tests : ', end='\\n\\n')\n v = vector.Vector(convert_arg_to_list(vector1))\n v2 = vector.Vector(convert_arg_to_list(vector2))\n v.add(v2)\n print('Add :', v)\n v.sub(v2)\n print('Sub :', v)\n v.scale(v2)\n print('Scale :', v)\n print('\\nMatrix tests : ', end='\\n\\n')\n m = matrix.Matrix(convert_arg_to_list(matrix1))\n m2 = matrix.Matrix(convert_arg_to_list(matrix2))\n m.add(m2)\n print('Add :\\n', m)\n m.sub(m2)\n print('\\nSub :\\n', m)\n m.scale(m2)\n print('\\nScale :\\n', m)\n",
"step-4": "import sys\nimport vector\nimport matrix\n\n\ndef convert_arg_to_list(arg):\n try:\n return [float(elem) for elem in arg]\n except:\n sys.exit('Invalid content inside {}'.format(arg))\n\n\nif __name__ == '__main__':\n try:\n vector1 = sys.argv[1].split(' ')\n vector2 = sys.argv[2].split(' ')\n except:\n sys.exit('Invalid vectors')\n try:\n matrix1 = sys.argv[1].split(' ')\n matrix2 = sys.argv[2].split(' ')\n except:\n sys.exit('Invalid Matricies')\n print('\\nVector tests : ', end='\\n\\n')\n v = vector.Vector(convert_arg_to_list(vector1))\n v2 = vector.Vector(convert_arg_to_list(vector2))\n v.add(v2)\n print('Add :', v)\n v.sub(v2)\n print('Sub :', v)\n v.scale(v2)\n print('Scale :', v)\n print('\\nMatrix tests : ', end='\\n\\n')\n m = matrix.Matrix(convert_arg_to_list(matrix1))\n m2 = matrix.Matrix(convert_arg_to_list(matrix2))\n m.add(m2)\n print('Add :\\n', m)\n m.sub(m2)\n print('\\nSub :\\n', m)\n m.scale(m2)\n print('\\nScale :\\n', m)\n",
"step-5": "import sys\nimport vector\nimport matrix\n\ndef convert_arg_to_list(arg):\n try:\n return [float(elem) for elem in arg]\n except:\n sys.exit(\"Invalid content inside {}\".format(arg))\n\nif __name__ == \"__main__\":\n try:\n vector1 = sys.argv[1].split(' ')\n vector2 = sys.argv[2].split(' ')\n except:\n sys.exit(\"Invalid vectors\")\n try:\n matrix1 = sys.argv[1].split(' ')\n matrix2 = sys.argv[2].split(' ')\n except:\n sys.exit(\"Invalid Matricies\")\n\n print(\"\\nVector tests : \", end='\\n\\n')\n v = vector.Vector(convert_arg_to_list(vector1))\n v2 = vector.Vector(convert_arg_to_list(vector2))\n\n #--------------------------------------------#\n # Vector part #\n v.add(v2)\n print(\"Add :\", v)\n v.sub(v2)\n print(\"Sub :\",v)\n v.scale(v2)\n print(\"Scale :\",v)\n # #\n #--------------------------------------------#\n\n print(\"\\nMatrix tests : \", end='\\n\\n')\n #--------------------------------------------#\n # Matrix part #\n m = matrix.Matrix(convert_arg_to_list(matrix1))\n m2 = matrix.Matrix(convert_arg_to_list(matrix2))\n m.add(m2)\n print(\"Add :\\n\", m)\n m.sub(m2)\n print(\"\\nSub :\\n\", m)\n m.scale(m2)\n print(\"\\nScale :\\n\", m)\n\n #--------------------------------------------#\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""API - Files endpoints."""
import os
import click
import cloudsmith_api
import requests
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from .. import ratelimits
from ..rest import create_requests_session
from ..utils import calculate_file_md5
from .exceptions import ApiException, catch_raise_api_exception
from .init import get_api_client
def get_files_api():
"""Get the files API client."""
return get_api_client(cloudsmith_api.FilesApi)
def validate_request_file_upload(owner, repo, filepath, md5_checksum=None):
"""Validate parameters for requesting a file upload."""
client = get_files_api()
md5_checksum = md5_checksum or calculate_file_md5(filepath)
with catch_raise_api_exception():
_, _, headers = client.files_validate_with_http_info(
owner=owner,
repo=repo,
data={"filename": os.path.basename(filepath), "md5_checksum": md5_checksum},
)
ratelimits.maybe_rate_limit(client, headers)
return md5_checksum
def request_file_upload(owner, repo, filepath, md5_checksum=None):
"""Request a new package file upload (for creating packages)."""
client = get_files_api()
md5_checksum = md5_checksum or calculate_file_md5(filepath)
with catch_raise_api_exception():
data, _, headers = client.files_create_with_http_info(
owner=owner,
repo=repo,
data={"filename": os.path.basename(filepath), "md5_checksum": md5_checksum},
)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
ratelimits.maybe_rate_limit(client, headers)
return data.identifier, data.upload_url, data.upload_fields
def upload_file(upload_url, upload_fields, filepath, callback=None):
"""Upload a pre-signed file to Cloudsmith."""
upload_fields = list(upload_fields.items())
upload_fields.append(
("file", (os.path.basename(filepath), click.open_file(filepath, "rb")))
)
encoder = MultipartEncoder(upload_fields)
monitor = MultipartEncoderMonitor(encoder, callback=callback)
config = cloudsmith_api.Configuration()
if config.proxy:
proxies = {"http": config.proxy, "https": config.proxy}
else:
proxies = None
headers = {"content-type": monitor.content_type}
client = get_files_api()
headers["user-agent"] = client.api_client.user_agent
session = create_requests_session()
resp = session.post(upload_url, data=monitor, headers=headers, proxies=proxies)
try:
resp.raise_for_status()
except requests.RequestException as exc:
raise ApiException(
resp.status_code, headers=exc.response.headers, body=exc.response.content
)
|
normal
|
{
"blob_id": "ee03263d92372899ec1feaf3a8ea48677b053676",
"index": 6281,
"step-1": "<mask token>\n\n\ndef get_files_api():\n \"\"\"Get the files API client.\"\"\"\n return get_api_client(cloudsmith_api.FilesApi)\n\n\ndef validate_request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Validate parameters for requesting a file upload.\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n _, _, headers = client.files_validate_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return md5_checksum\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_files_api():\n \"\"\"Get the files API client.\"\"\"\n return get_api_client(cloudsmith_api.FilesApi)\n\n\ndef validate_request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Validate parameters for requesting a file upload.\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n _, _, headers = client.files_validate_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return md5_checksum\n\n\n<mask token>\n\n\ndef upload_file(upload_url, upload_fields, filepath, callback=None):\n \"\"\"Upload a pre-signed file to Cloudsmith.\"\"\"\n upload_fields = list(upload_fields.items())\n upload_fields.append(('file', (os.path.basename(filepath), click.\n open_file(filepath, 'rb'))))\n encoder = MultipartEncoder(upload_fields)\n monitor = MultipartEncoderMonitor(encoder, callback=callback)\n config = cloudsmith_api.Configuration()\n if config.proxy:\n proxies = {'http': config.proxy, 'https': config.proxy}\n else:\n proxies = None\n headers = {'content-type': monitor.content_type}\n client = get_files_api()\n headers['user-agent'] = client.api_client.user_agent\n session = create_requests_session()\n resp = session.post(upload_url, data=monitor, headers=headers, proxies=\n proxies)\n try:\n resp.raise_for_status()\n except requests.RequestException as exc:\n raise ApiException(resp.status_code, headers=exc.response.headers,\n body=exc.response.content)\n",
"step-3": "<mask token>\n\n\ndef get_files_api():\n \"\"\"Get the files API client.\"\"\"\n return get_api_client(cloudsmith_api.FilesApi)\n\n\ndef validate_request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Validate parameters for requesting a file upload.\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n _, _, headers = client.files_validate_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return md5_checksum\n\n\ndef request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Request a new package file upload (for creating packages).\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n data, _, headers = client.files_create_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return data.identifier, data.upload_url, data.upload_fields\n\n\ndef upload_file(upload_url, upload_fields, filepath, callback=None):\n \"\"\"Upload a pre-signed file to Cloudsmith.\"\"\"\n upload_fields = list(upload_fields.items())\n upload_fields.append(('file', (os.path.basename(filepath), click.\n open_file(filepath, 'rb'))))\n encoder = MultipartEncoder(upload_fields)\n monitor = MultipartEncoderMonitor(encoder, callback=callback)\n config = cloudsmith_api.Configuration()\n if config.proxy:\n proxies = {'http': config.proxy, 'https': config.proxy}\n else:\n proxies = None\n headers = {'content-type': monitor.content_type}\n client = get_files_api()\n headers['user-agent'] = client.api_client.user_agent\n session = create_requests_session()\n resp = session.post(upload_url, data=monitor, headers=headers, proxies=\n proxies)\n try:\n resp.raise_for_status()\n except requests.RequestException as exc:\n raise ApiException(resp.status_code, headers=exc.response.headers,\n body=exc.response.content)\n",
"step-4": "<mask token>\nimport os\nimport click\nimport cloudsmith_api\nimport requests\nfrom requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\nfrom .. import ratelimits\nfrom ..rest import create_requests_session\nfrom ..utils import calculate_file_md5\nfrom .exceptions import ApiException, catch_raise_api_exception\nfrom .init import get_api_client\n\n\ndef get_files_api():\n \"\"\"Get the files API client.\"\"\"\n return get_api_client(cloudsmith_api.FilesApi)\n\n\ndef validate_request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Validate parameters for requesting a file upload.\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n _, _, headers = client.files_validate_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return md5_checksum\n\n\ndef request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Request a new package file upload (for creating packages).\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n with catch_raise_api_exception():\n data, _, headers = client.files_create_with_http_info(owner=owner,\n repo=repo, data={'filename': os.path.basename(filepath),\n 'md5_checksum': md5_checksum})\n ratelimits.maybe_rate_limit(client, headers)\n return data.identifier, data.upload_url, data.upload_fields\n\n\ndef upload_file(upload_url, upload_fields, filepath, callback=None):\n \"\"\"Upload a pre-signed file to Cloudsmith.\"\"\"\n upload_fields = list(upload_fields.items())\n upload_fields.append(('file', (os.path.basename(filepath), click.\n open_file(filepath, 'rb'))))\n encoder = MultipartEncoder(upload_fields)\n monitor = MultipartEncoderMonitor(encoder, callback=callback)\n config = cloudsmith_api.Configuration()\n if config.proxy:\n proxies = {'http': config.proxy, 'https': config.proxy}\n else:\n proxies = None\n headers = {'content-type': monitor.content_type}\n client = get_files_api()\n headers['user-agent'] = client.api_client.user_agent\n session = create_requests_session()\n resp = session.post(upload_url, data=monitor, headers=headers, proxies=\n proxies)\n try:\n resp.raise_for_status()\n except requests.RequestException as exc:\n raise ApiException(resp.status_code, headers=exc.response.headers,\n body=exc.response.content)\n",
"step-5": "\"\"\"API - Files endpoints.\"\"\"\n\nimport os\n\nimport click\nimport cloudsmith_api\nimport requests\nfrom requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n\nfrom .. import ratelimits\nfrom ..rest import create_requests_session\nfrom ..utils import calculate_file_md5\nfrom .exceptions import ApiException, catch_raise_api_exception\nfrom .init import get_api_client\n\n\ndef get_files_api():\n \"\"\"Get the files API client.\"\"\"\n return get_api_client(cloudsmith_api.FilesApi)\n\n\ndef validate_request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Validate parameters for requesting a file upload.\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n\n with catch_raise_api_exception():\n _, _, headers = client.files_validate_with_http_info(\n owner=owner,\n repo=repo,\n data={\"filename\": os.path.basename(filepath), \"md5_checksum\": md5_checksum},\n )\n\n ratelimits.maybe_rate_limit(client, headers)\n return md5_checksum\n\n\ndef request_file_upload(owner, repo, filepath, md5_checksum=None):\n \"\"\"Request a new package file upload (for creating packages).\"\"\"\n client = get_files_api()\n md5_checksum = md5_checksum or calculate_file_md5(filepath)\n\n with catch_raise_api_exception():\n data, _, headers = client.files_create_with_http_info(\n owner=owner,\n repo=repo,\n data={\"filename\": os.path.basename(filepath), \"md5_checksum\": md5_checksum},\n )\n\n # pylint: disable=no-member\n # Pylint detects the returned value as a tuple\n ratelimits.maybe_rate_limit(client, headers)\n return data.identifier, data.upload_url, data.upload_fields\n\n\ndef upload_file(upload_url, upload_fields, filepath, callback=None):\n \"\"\"Upload a pre-signed file to Cloudsmith.\"\"\"\n upload_fields = list(upload_fields.items())\n upload_fields.append(\n (\"file\", (os.path.basename(filepath), click.open_file(filepath, \"rb\")))\n )\n encoder = MultipartEncoder(upload_fields)\n monitor = MultipartEncoderMonitor(encoder, callback=callback)\n\n config = cloudsmith_api.Configuration()\n if config.proxy:\n proxies = {\"http\": config.proxy, \"https\": config.proxy}\n else:\n proxies = None\n\n headers = {\"content-type\": monitor.content_type}\n\n client = get_files_api()\n headers[\"user-agent\"] = client.api_client.user_agent\n\n session = create_requests_session()\n resp = session.post(upload_url, data=monitor, headers=headers, proxies=proxies)\n\n try:\n resp.raise_for_status()\n except requests.RequestException as exc:\n raise ApiException(\n resp.status_code, headers=exc.response.headers, body=exc.response.content\n )\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print('digits before . (dot) is {} '.format(Ipart))
if strDpart == '0':
print('digits after . (dot) is 0')
else:
print('digits after . (dot) is {}'.format(strDpart[2:]))
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
print(' {}|_{}'.format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)
)
else:
print(' ' + str(nnum) + ' --->{}'.format(srem))
Ilist.append(nnum)
print(' --------------------------------------------------')
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
k = 0
while k < (len(strDpart) - 2) * 2:
print('{} x {} = '.format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(' --------------------------------------------------')
print('integer part:')
print(Ilist[::-1])
print('decimal part:')
print(Dlist)
dot = ['.']
y = Ilist[::-1]
y1 = y + dot + Dlist
for i in range(len(y1)):
y1[i] = str(y1[i])
print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)
<|reserved_special_token_0|>
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input('Enter a number :'))
base1 = int(input('Enter base of {} :'.format(num)))
base2 = int(input('Enter the base of resulting number:'))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print('digits before . (dot) is {} '.format(Ipart))
if strDpart == '0':
print('digits after . (dot) is 0')
else:
print('digits after . (dot) is {}'.format(strDpart[2:]))
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
print(' {}|_{}'.format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)
)
else:
print(' ' + str(nnum) + ' --->{}'.format(srem))
Ilist.append(nnum)
print(' --------------------------------------------------')
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
k = 0
while k < (len(strDpart) - 2) * 2:
print('{} x {} = '.format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(' --------------------------------------------------')
print('integer part:')
print(Ilist[::-1])
print('decimal part:')
print(Dlist)
dot = ['.']
y = Ilist[::-1]
y1 = y + dot + Dlist
for i in range(len(y1)):
y1[i] = str(y1[i])
print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)
def toTen():
mnum = num
mbase = base1
global fin
mdnum = mnum - int(mnum)
minum = int(mnum)
strmdnum = str(mdnum)[2:]
mdlen = len(strmdnum)
strminum = str(minum)[::-1]
milen = len(strminum)
strnum = strmdnum + strminum
con = 0
for i in range(len(strnum)):
a = int(strnum[i])
if a >= mbase:
con = con + 1
if con == 0:
p = 0
global milst, mdlst
milst = []
mdlst = []
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
for ii in range(milen):
minum = int(strminum[ii])
power1 = pow(mbase, p)
print('{} power {} is "{}" '.format(mbase, p, power1),
' --> {} x {} = {}'.format(power1, minum, minum * power1)
)
p = p + 1
milst.append(minum * power1)
print('___________________________________________________')
print()
print('ADDITION OF INTEGRAL PART ===> ', end='')
for i in range(milen):
if i + 1 < milen:
print(' {} +'.format(milst[i]), end='')
if i + 1 == milen:
print('{} = '.format(milst[i]), end='')
print(sum(milst))
print()
print('___________________________________________________')
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
print()
mbase = Decimal(mbase)
for jj in range(mdlen):
q = Decimal(pow(mbase, -(jj + 1)))
print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)
)
print(' ', strmdnum[jj], ' x ', q,
' = ', q * int(strmdnum[jj]))
mdlst.append(float(q * int(strmdnum[jj])))
print(' --------------------------------------------------')
print(sum(mdlst))
print('___________________________________________________')
print()
print('ADDITION OF DECIMAL PART ===> ', end='')
for i in range(mdlen):
if i + 1 < mdlen:
print(' {} +'.format(mdlst[i]), end='')
if i + 1 == mdlen:
print('{} = '.format(mdlst[i]), end='')
print(sum(mdlst))
print('___________________________________________________')
print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(
sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))
print(' --------------------------------------------------')
else:
try:
print(' --------------------------------------------------')
print(' ---------------------')
print(' | INVALID |')
print(' ---------------------')
print()
print('all the digits should be less than the base ')
print('The base of {} should not be {}'.format(mnum, mbase))
print()
main()
except:
pass
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input('Enter a number :'))
base1 = int(input('Enter base of {} :'.format(num)))
base2 = int(input('Enter the base of resulting number:'))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
<|reserved_special_token_0|>
if s == 1:
main()
s = s + 1
while True:
print('\n')
condition = input('Do you want to continue ? (y/n):')
if condition == 'y':
main()
elif condition == 'n':
print()
quit()
else:
print('Invalid input')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print('digits before . (dot) is {} '.format(Ipart))
if strDpart == '0':
print('digits after . (dot) is 0')
else:
print('digits after . (dot) is {}'.format(strDpart[2:]))
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
print(' {}|_{}'.format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)
)
else:
print(' ' + str(nnum) + ' --->{}'.format(srem))
Ilist.append(nnum)
print(' --------------------------------------------------')
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
k = 0
while k < (len(strDpart) - 2) * 2:
print('{} x {} = '.format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(' --------------------------------------------------')
print('integer part:')
print(Ilist[::-1])
print('decimal part:')
print(Dlist)
dot = ['.']
y = Ilist[::-1]
y1 = y + dot + Dlist
for i in range(len(y1)):
y1[i] = str(y1[i])
print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)
def toTen():
mnum = num
mbase = base1
global fin
mdnum = mnum - int(mnum)
minum = int(mnum)
strmdnum = str(mdnum)[2:]
mdlen = len(strmdnum)
strminum = str(minum)[::-1]
milen = len(strminum)
strnum = strmdnum + strminum
con = 0
for i in range(len(strnum)):
a = int(strnum[i])
if a >= mbase:
con = con + 1
if con == 0:
p = 0
global milst, mdlst
milst = []
mdlst = []
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
for ii in range(milen):
minum = int(strminum[ii])
power1 = pow(mbase, p)
print('{} power {} is "{}" '.format(mbase, p, power1),
' --> {} x {} = {}'.format(power1, minum, minum * power1)
)
p = p + 1
milst.append(minum * power1)
print('___________________________________________________')
print()
print('ADDITION OF INTEGRAL PART ===> ', end='')
for i in range(milen):
if i + 1 < milen:
print(' {} +'.format(milst[i]), end='')
if i + 1 == milen:
print('{} = '.format(milst[i]), end='')
print(sum(milst))
print()
print('___________________________________________________')
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
print()
mbase = Decimal(mbase)
for jj in range(mdlen):
q = Decimal(pow(mbase, -(jj + 1)))
print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)
)
print(' ', strmdnum[jj], ' x ', q,
' = ', q * int(strmdnum[jj]))
mdlst.append(float(q * int(strmdnum[jj])))
print(' --------------------------------------------------')
print(sum(mdlst))
print('___________________________________________________')
print()
print('ADDITION OF DECIMAL PART ===> ', end='')
for i in range(mdlen):
if i + 1 < mdlen:
print(' {} +'.format(mdlst[i]), end='')
if i + 1 == mdlen:
print('{} = '.format(mdlst[i]), end='')
print(sum(mdlst))
print('___________________________________________________')
print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(
sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))
print(' --------------------------------------------------')
else:
try:
print(' --------------------------------------------------')
print(' ---------------------')
print(' | INVALID |')
print(' ---------------------')
print()
print('all the digits should be less than the base ')
print('The base of {} should not be {}'.format(mnum, mbase))
print()
main()
except:
pass
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input('Enter a number :'))
base1 = int(input('Enter base of {} :'.format(num)))
base2 = int(input('Enter the base of resulting number:'))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
s = 1
if s == 1:
main()
s = s + 1
while True:
print('\n')
condition = input('Do you want to continue ? (y/n):')
if condition == 'y':
main()
elif condition == 'n':
print()
quit()
else:
print('Invalid input')
<|reserved_special_token_1|>
from decimal import Decimal
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print('digits before . (dot) is {} '.format(Ipart))
if strDpart == '0':
print('digits after . (dot) is 0')
else:
print('digits after . (dot) is {}'.format(strDpart[2:]))
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
print(' {}|_{}'.format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)
)
else:
print(' ' + str(nnum) + ' --->{}'.format(srem))
Ilist.append(nnum)
print(' --------------------------------------------------')
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
k = 0
while k < (len(strDpart) - 2) * 2:
print('{} x {} = '.format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(' --------------------------------------------------')
print('integer part:')
print(Ilist[::-1])
print('decimal part:')
print(Dlist)
dot = ['.']
y = Ilist[::-1]
y1 = y + dot + Dlist
for i in range(len(y1)):
y1[i] = str(y1[i])
print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)
def toTen():
mnum = num
mbase = base1
global fin
mdnum = mnum - int(mnum)
minum = int(mnum)
strmdnum = str(mdnum)[2:]
mdlen = len(strmdnum)
strminum = str(minum)[::-1]
milen = len(strminum)
strnum = strmdnum + strminum
con = 0
for i in range(len(strnum)):
a = int(strnum[i])
if a >= mbase:
con = con + 1
if con == 0:
p = 0
global milst, mdlst
milst = []
mdlst = []
print(' --------------------------------------------------')
print('| INTEGRAL PART |')
print(' --------------------------------------------------')
for ii in range(milen):
minum = int(strminum[ii])
power1 = pow(mbase, p)
print('{} power {} is "{}" '.format(mbase, p, power1),
' --> {} x {} = {}'.format(power1, minum, minum * power1)
)
p = p + 1
milst.append(minum * power1)
print('___________________________________________________')
print()
print('ADDITION OF INTEGRAL PART ===> ', end='')
for i in range(milen):
if i + 1 < milen:
print(' {} +'.format(milst[i]), end='')
if i + 1 == milen:
print('{} = '.format(milst[i]), end='')
print(sum(milst))
print()
print('___________________________________________________')
print(' --------------------------------------------------')
print('| DECIMAL PART |')
print(' --------------------------------------------------')
print()
mbase = Decimal(mbase)
for jj in range(mdlen):
q = Decimal(pow(mbase, -(jj + 1)))
print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)
)
print(' ', strmdnum[jj], ' x ', q,
' = ', q * int(strmdnum[jj]))
mdlst.append(float(q * int(strmdnum[jj])))
print(' --------------------------------------------------')
print(sum(mdlst))
print('___________________________________________________')
print()
print('ADDITION OF DECIMAL PART ===> ', end='')
for i in range(mdlen):
if i + 1 < mdlen:
print(' {} +'.format(mdlst[i]), end='')
if i + 1 == mdlen:
print('{} = '.format(mdlst[i]), end='')
print(sum(mdlst))
print('___________________________________________________')
print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(
sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))
print(' --------------------------------------------------')
else:
try:
print(' --------------------------------------------------')
print(' ---------------------')
print(' | INVALID |')
print(' ---------------------')
print()
print('all the digits should be less than the base ')
print('The base of {} should not be {}'.format(mnum, mbase))
print()
main()
except:
pass
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input('Enter a number :'))
base1 = int(input('Enter base of {} :'.format(num)))
base2 = int(input('Enter the base of resulting number:'))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
s = 1
if s == 1:
main()
s = s + 1
while True:
print('\n')
condition = input('Do you want to continue ? (y/n):')
if condition == 'y':
main()
elif condition == 'n':
print()
quit()
else:
print('Invalid input')
<|reserved_special_token_1|>
# created by ahmad on 17-07-2019
# last updated on 21-07-2019
#recommended font size of console in pydroid is 12
from decimal import Decimal
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print("digits before . (dot) is {} ".format(Ipart))
if strDpart == "0":
print("digits after . (dot) is 0")
else:
print("digits after . (dot) is {}".format(strDpart[2:]))
print(" --------------------------------------------------")
print("| INTEGRAL PART |")
print(" --------------------------------------------------")
print(" {}|_{}".format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(" {}|_".format(base) + str(nnum) + " --->{}".format(srem))
else:
print(" " + str(nnum) + " --->{}".format(srem))
Ilist.append(nnum)
print(" --------------------------------------------------")
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(" --------------------------------------------------")
print("| DECIMAL PART |")
print(" --------------------------------------------------")
k = 0
while k < (len(strDpart) - 2) * 2:
print("{} x {} = ".format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(" --------------------------------------------------")
print("integer part:")
print(Ilist[::-1])
print("decimal part:")
print(Dlist)
dot = ["."]
y=Ilist[::-1]
y1=y+dot+ Dlist
for i in range(len(y1)):
y1[i]=str(y1[i])
print("Final Answer = ",'(' ,''.join(y1),')','base',base2)
def toTen():
mnum = num
mbase = base1
global fin
mdnum = mnum - int(mnum)
minum = int(mnum)
strmdnum = str(mdnum)[2:]
mdlen = len(strmdnum)
strminum = str(minum)[::-1]
milen = len(strminum)
strnum = strmdnum + strminum
con = 0
for i in range(len(strnum)):
a = int(strnum[i])
if a >= mbase:
con = con + 1
if con == 0:
p = 0
global milst, mdlst
milst = []
mdlst = []
print(" --------------------------------------------------")
print("| INTEGRAL PART |")
print(" --------------------------------------------------")
for ii in range(milen):
minum = int(strminum[ii])
power1 = pow(mbase, p)
print("""{} power {} is "{}" """.format(mbase, p, power1),
" --> {} x {} = {}".format(power1, minum, minum * power1))
p = p + 1
milst.append(minum * power1)
print("___________________________________________________")
print()
print("ADDITION OF INTEGRAL PART ===> ", end='')
for i in range(milen):
if (i + 1) < (milen):
print(" {} +".format(milst[i]), end='')
if i + 1 == milen:
print("{} = ".format(milst[i]), end='')
print(sum(milst))
print()
print("___________________________________________________")
print(" --------------------------------------------------")
print("| DECIMAL PART |")
print(" --------------------------------------------------")
print()
mbase = Decimal(mbase)
for jj in range(mdlen):
q = Decimal(pow(mbase, -(jj + 1)))
print("{} power {} = {} ---> ".format(mbase, -(jj + 1), q)) # ,end='')
print(" ", strmdnum[jj], " x ", q, " = ", q * int(strmdnum[jj]))
mdlst.append(float(q * int(strmdnum[jj])))
print(" --------------------------------------------------")
print(sum(mdlst))
print("___________________________________________________")
print()
print("ADDITION OF DECIMAL PART ===> ", end='')
for i in range(mdlen):
if (i + 1) < (mdlen):
print(" {} +".format(mdlst[i]), end='')
if i + 1 == mdlen:
print("{} = ".format(mdlst[i]), end='')
print(sum(mdlst))
print("___________________________________________________")
# print("---------------------------------------------------------------")
print("SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = ".format(sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))
print(" --------------------------------------------------")
else:
try:
print(" --------------------------------------------------")
print(" ---------------------")
print(" | INVALID |")
print(" ---------------------")
print()
print("all the digits should be less than the base ")
print("The base of {} should not be {}".format(mnum, mbase))
print()
main()
except:
pass
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input("Enter a number :"))
base1 = int(input("Enter base of {} :".format(num)))
base2 = int(input("Enter the base of resulting number:"))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
s = 1
if s == 1:
main()
s = s + 1
while True:
print("\n")
condition = input("Do you want to continue ? (y/n):")
if condition == "y":
main()
elif condition == "n":
print()
quit()
else:
print("Invalid input")
|
flexible
|
{
"blob_id": "9cf32e127664cb4c3290e665e35245acc936e064",
"index": 4090,
"step-1": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\n<mask token>\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\n<mask token>\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n",
"step-3": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n",
"step-4": "from decimal import Decimal\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n",
"step-5": "# created by ahmad on 17-07-2019\n# last updated on 21-07-2019\n#recommended font size of console in pydroid is 12\n\nfrom decimal import Decimal\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n \n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print(\"digits before . (dot) is {} \".format(Ipart))\n if strDpart == \"0\":\n print(\"digits after . (dot) is 0\")\n else:\n print(\"digits after . (dot) is {}\".format(strDpart[2:])) \n print(\" --------------------------------------------------\")\n print(\"| INTEGRAL PART |\")\n print(\" --------------------------------------------------\")\n print(\" {}|_{}\".format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(\" {}|_\".format(base) + str(nnum) + \" --->{}\".format(srem))\n else:\n print(\" \" + str(nnum) + \" --->{}\".format(srem))\n Ilist.append(nnum)\n print(\" --------------------------------------------------\")\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n \n print(Ilist[::-1])\n print()\n print(\" --------------------------------------------------\")\n print(\"| DECIMAL PART |\")\n print(\" --------------------------------------------------\")\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print(\"{} x {} = \".format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n\n print(\" --------------------------------------------------\")\n print(\"integer part:\")\n print(Ilist[::-1])\n print(\"decimal part:\")\n print(Dlist)\n dot = [\".\"]\n y=Ilist[::-1]\n y1=y+dot+ Dlist\n for i in range(len(y1)):\n \ty1[i]=str(y1[i])\n \n print(\"Final Answer = \",'(' ,''.join(y1),')','base',base2)\n\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(\" --------------------------------------------------\")\n print(\"| INTEGRAL PART |\")\n print(\" --------------------------------------------------\")\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print(\"\"\"{} power {} is \"{}\" \"\"\".format(mbase, p, power1),\n \" --> {} x {} = {}\".format(power1, minum, minum * power1))\n p = p + 1\n milst.append(minum * power1)\n print(\"___________________________________________________\")\n print()\n print(\"ADDITION OF INTEGRAL PART ===> \", end='')\n for i in range(milen):\n if (i + 1) < (milen):\n print(\" {} +\".format(milst[i]), end='')\n if i + 1 == milen:\n print(\"{} = \".format(milst[i]), end='')\n print(sum(milst))\n print()\n print(\"___________________________________________________\")\n\n print(\" --------------------------------------------------\")\n print(\"| DECIMAL PART |\")\n print(\" --------------------------------------------------\")\n print()\n mbase = Decimal(mbase)\n \n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print(\"{} power {} = {} ---> \".format(mbase, -(jj + 1), q)) # ,end='')\n print(\" \", strmdnum[jj], \" x \", q, \" = \", q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(\" --------------------------------------------------\")\n print(sum(mdlst))\n print(\"___________________________________________________\")\n print()\n print(\"ADDITION OF DECIMAL PART ===> \", end='')\n for i in range(mdlen):\n if (i + 1) < (mdlen):\n print(\" {} +\".format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print(\"{} = \".format(mdlst[i]), end='')\n print(sum(mdlst))\n print(\"___________________________________________________\")\n # print(\"---------------------------------------------------------------\")\n print(\"SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = \".format(sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(\" --------------------------------------------------\")\n else:\n\n \ttry:\n \tprint(\" --------------------------------------------------\")\n \tprint(\" ---------------------\")\n \tprint(\" | INVALID |\")\n \tprint(\" ---------------------\")\n \tprint()\n \tprint(\"all the digits should be less than the base \")\n \tprint(\"The base of {} should not be {}\".format(mnum, mbase))\n \tprint()\n \tmain()\n \texcept:\n \tpass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n \n num = Decimal(input(\"Enter a number :\"))\n base1 = int(input(\"Enter base of {} :\".format(num)))\n base2 = int(input(\"Enter the base of resulting number:\"))\n print(num)\n \n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print(\"\\n\")\n condition = input(\"Do you want to continue ? (y/n):\")\n if condition == \"y\":\n main()\n elif condition == \"n\":\n print()\n \n quit()\n else:\n print(\"Invalid input\")\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
@app.task
def generate_static_index_html():
"""产生首页静态页面"""
types = GoodsType.objects.all()
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
for type in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=0).order_by('index')
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types': types, 'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
temp = loader.get_template('static_index.html')
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR,
'static/static_index/index.html')
with open(save_path, 'w', encoding='utf-8') as f:
f.write(statoc_index_html)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.task
def send_register_active_email(to_email, username, token):
"""发送激活邮件"""
subject = '天天生鲜欢迎信息'
message = ''
sender = settings.EMAIL_FROM
receiver = [to_email]
html_message = (
'<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a>'
% (username, token, token))
send_mail(subject, message, sender, receiver, html_message=html_message)
@app.task
def generate_static_index_html():
"""产生首页静态页面"""
types = GoodsType.objects.all()
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
for type in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=0).order_by('index')
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types': types, 'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
temp = loader.get_template('static_index.html')
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR,
'static/static_index/index.html')
with open(save_path, 'w', encoding='utf-8') as f:
f.write(statoc_index_html)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')
django.setup()
<|reserved_special_token_0|>
app = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')
@app.task
def send_register_active_email(to_email, username, token):
"""发送激活邮件"""
subject = '天天生鲜欢迎信息'
message = ''
sender = settings.EMAIL_FROM
receiver = [to_email]
html_message = (
'<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a>'
% (username, token, token))
send_mail(subject, message, sender, receiver, html_message=html_message)
@app.task
def generate_static_index_html():
"""产生首页静态页面"""
types = GoodsType.objects.all()
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
for type in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=0).order_by('index')
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types': types, 'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
temp = loader.get_template('static_index.html')
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR,
'static/static_index/index.html')
with open(save_path, 'w', encoding='utf-8') as f:
f.write(statoc_index_html)
<|reserved_special_token_1|>
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader, RequestContext
from celery import Celery
import time
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')
django.setup()
from goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
app = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')
@app.task
def send_register_active_email(to_email, username, token):
"""发送激活邮件"""
subject = '天天生鲜欢迎信息'
message = ''
sender = settings.EMAIL_FROM
receiver = [to_email]
html_message = (
'<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a>'
% (username, token, token))
send_mail(subject, message, sender, receiver, html_message=html_message)
@app.task
def generate_static_index_html():
"""产生首页静态页面"""
types = GoodsType.objects.all()
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
for type in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type,
display_type=0).order_by('index')
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types': types, 'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
temp = loader.get_template('static_index.html')
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR,
'static/static_index/index.html')
with open(save_path, 'w', encoding='utf-8') as f:
f.write(statoc_index_html)
<|reserved_special_token_1|>
# 使用celery
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader,RequestContext
from celery import Celery
import time
# 在任务处理者一
#
# 端加的代码
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dailyfresh.settings")
django.setup()
from goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
# 创建一个实例对象
app = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')
# 定义任务函数,发邮件函数
@app.task
def send_register_active_email(to_email, username, token):
'''发送激活邮件'''
# 组织邮件信息
subject = '天天生鲜欢迎信息'
message = ''
sender = settings.EMAIL_FROM
receiver = [to_email]
html_message = '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a>'%(username, token, token)
send_mail(subject, message, sender, receiver, html_message=html_message)
@app.task
def generate_static_index_html():
'''产生首页静态页面'''
types = GoodsType.objects.all()
# 获取首页轮播图信息
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
# 获取首页促销信息
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
# 获取首页分类商品展示信息
#type_goods_banners = IndexTypeGoodsBanner.objects.all()
for type in types:
# 获取type种类首页分类商品图片信息
image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')
# 获取type种类首页分类商品的文字展示信息
title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')
# 将查出来的数据动态添加到type中
type.image_banners = image_banners
type.title_banners = title_banners
# 获取用户购物车中商品信息
# 组织模范上下文
context = {'types': types,
'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
# 加载模板文件
temp = loader.get_template('static_index.html')
# 定义模板上下文
# 模板渲染
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR, 'static/static_index/index.html')
with open(save_path,'w',encoding='utf-8') as f:
f.write(statoc_index_html)
|
flexible
|
{
"blob_id": "7f7d087b7001cd7df01d4f22e056809be5a35568",
"index": 9584,
"step-1": "<mask token>\n\n\[email protected]\ndef generate_static_index_html():\n \"\"\"产生首页静态页面\"\"\"\n types = GoodsType.objects.all()\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n for type in types:\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=1).order_by('index')\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=0).order_by('index')\n type.image_banners = image_banners\n type.title_banners = title_banners\n context = {'types': types, 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners}\n temp = loader.get_template('static_index.html')\n statoc_index_html = temp.render(context)\n save_path = os.path.join(settings.BASE_DIR,\n 'static/static_index/index.html')\n with open(save_path, 'w', encoding='utf-8') as f:\n f.write(statoc_index_html)\n",
"step-2": "<mask token>\n\n\[email protected]\ndef send_register_active_email(to_email, username, token):\n \"\"\"发送激活邮件\"\"\"\n subject = '天天生鲜欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = (\n '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href=\"http://127.0.0.1:8000/user/active/%s\">http://127.0.0.1:8000/user/active/%s</a>'\n % (username, token, token))\n send_mail(subject, message, sender, receiver, html_message=html_message)\n\n\[email protected]\ndef generate_static_index_html():\n \"\"\"产生首页静态页面\"\"\"\n types = GoodsType.objects.all()\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n for type in types:\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=1).order_by('index')\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=0).order_by('index')\n type.image_banners = image_banners\n type.title_banners = title_banners\n context = {'types': types, 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners}\n temp = loader.get_template('static_index.html')\n statoc_index_html = temp.render(context)\n save_path = os.path.join(settings.BASE_DIR,\n 'static/static_index/index.html')\n with open(save_path, 'w', encoding='utf-8') as f:\n f.write(statoc_index_html)\n",
"step-3": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')\ndjango.setup()\n<mask token>\napp = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')\n\n\[email protected]\ndef send_register_active_email(to_email, username, token):\n \"\"\"发送激活邮件\"\"\"\n subject = '天天生鲜欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = (\n '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href=\"http://127.0.0.1:8000/user/active/%s\">http://127.0.0.1:8000/user/active/%s</a>'\n % (username, token, token))\n send_mail(subject, message, sender, receiver, html_message=html_message)\n\n\[email protected]\ndef generate_static_index_html():\n \"\"\"产生首页静态页面\"\"\"\n types = GoodsType.objects.all()\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n for type in types:\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=1).order_by('index')\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=0).order_by('index')\n type.image_banners = image_banners\n type.title_banners = title_banners\n context = {'types': types, 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners}\n temp = loader.get_template('static_index.html')\n statoc_index_html = temp.render(context)\n save_path = os.path.join(settings.BASE_DIR,\n 'static/static_index/index.html')\n with open(save_path, 'w', encoding='utf-8') as f:\n f.write(statoc_index_html)\n",
"step-4": "from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template import loader, RequestContext\nfrom celery import Celery\nimport time\nimport os\nimport django\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')\ndjango.setup()\nfrom goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner\napp = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')\n\n\[email protected]\ndef send_register_active_email(to_email, username, token):\n \"\"\"发送激活邮件\"\"\"\n subject = '天天生鲜欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = (\n '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href=\"http://127.0.0.1:8000/user/active/%s\">http://127.0.0.1:8000/user/active/%s</a>'\n % (username, token, token))\n send_mail(subject, message, sender, receiver, html_message=html_message)\n\n\[email protected]\ndef generate_static_index_html():\n \"\"\"产生首页静态页面\"\"\"\n types = GoodsType.objects.all()\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n for type in types:\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=1).order_by('index')\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type,\n display_type=0).order_by('index')\n type.image_banners = image_banners\n type.title_banners = title_banners\n context = {'types': types, 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners}\n temp = loader.get_template('static_index.html')\n statoc_index_html = temp.render(context)\n save_path = os.path.join(settings.BASE_DIR,\n 'static/static_index/index.html')\n with open(save_path, 'w', encoding='utf-8') as f:\n f.write(statoc_index_html)\n",
"step-5": "# 使用celery\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template import loader,RequestContext\nfrom celery import Celery\nimport time\n# 在任务处理者一\n#\n# 端加的代码\nimport os\nimport django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"dailyfresh.settings\")\ndjango.setup()\n\nfrom goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner\n\n# 创建一个实例对象\napp = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')\n# 定义任务函数,发邮件函数\[email protected]\ndef send_register_active_email(to_email, username, token):\n '''发送激活邮件'''\n # 组织邮件信息\n subject = '天天生鲜欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href=\"http://127.0.0.1:8000/user/active/%s\">http://127.0.0.1:8000/user/active/%s</a>'%(username, token, token)\n send_mail(subject, message, sender, receiver, html_message=html_message)\n\[email protected]\ndef generate_static_index_html():\n '''产生首页静态页面'''\n types = GoodsType.objects.all()\n # 获取首页轮播图信息\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n # 获取首页促销信息\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n # 获取首页分类商品展示信息\n #type_goods_banners = IndexTypeGoodsBanner.objects.all()\n for type in types:\n\n # 获取type种类首页分类商品图片信息\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')\n # 获取type种类首页分类商品的文字展示信息\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')\n # 将查出来的数据动态添加到type中\n type.image_banners = image_banners\n type.title_banners = title_banners\n # 获取用户购物车中商品信息\n # 组织模范上下文\n context = {'types': types,\n 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners}\n\n # 加载模板文件\n temp = loader.get_template('static_index.html')\n # 定义模板上下文\n # 模板渲染\n statoc_index_html = temp.render(context)\n\n save_path = os.path.join(settings.BASE_DIR, 'static/static_index/index.html')\n with open(save_path,'w',encoding='utf-8') as f:\n f.write(statoc_index_html)\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuoteListPagination(PageNumberPagination):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuoteListPagination(PageNumberPagination):
page_size = 30
<|reserved_special_token_1|>
from rest_framework.pagination import PageNumberPagination
class QuoteListPagination(PageNumberPagination):
page_size = 30
|
flexible
|
{
"blob_id": "4245da12eb7f9dd08c863e368efbd0bcf0b8fa04",
"index": 6816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-4": "from rest_framework.pagination import PageNumberPagination\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import itertools
def odds(upper_limit):
return [i for i in range(1,upper_limit,2)]
def evens(upper_limit):
return [i for i in range(0,upper_limit,2)]
nested = [i**j for i in range(1,10) for j in range(1,4)]
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [chr(i) for i in range(97,123) if chr(i) not in vowels]
ascii_table = {i:chr(i) for i in itertools.chain(range(65,91), range(97,123))}
ascii_lowercase = {i:chr(i) for i in ascii_table.keys() if chr(i) == chr(i).lower()}
if __name__ == "__main__":
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
|
normal
|
{
"blob_id": "a2e4e4a0c49c319df2adb073b11107d3f520aa6e",
"index": 1883,
"step-1": "<mask token>\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\n<mask token>\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-3": "<mask token>\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\nnested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]\nvowels = ['a', 'e', 'i', 'o', 'u']\nconsonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]\nascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97, \n 123))}\nascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i\n ).lower()}\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-4": "import itertools\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\nnested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]\nvowels = ['a', 'e', 'i', 'o', 'u']\nconsonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]\nascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97, \n 123))}\nascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i\n ).lower()}\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-5": "import itertools\n\ndef odds(upper_limit):\n return [i for i in range(1,upper_limit,2)]\n\ndef evens(upper_limit):\n return [i for i in range(0,upper_limit,2)]\n\nnested = [i**j for i in range(1,10) for j in range(1,4)]\n\nvowels = ['a', 'e', 'i', 'o', 'u']\n\nconsonants = [chr(i) for i in range(97,123) if chr(i) not in vowels]\n\nascii_table = {i:chr(i) for i in itertools.chain(range(65,91), range(97,123))}\n\nascii_lowercase = {i:chr(i) for i in ascii_table.keys() if chr(i) == chr(i).lower()}\n\n\n\nif __name__ == \"__main__\":\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested) \n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels]) \n \n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#!/usr/bin/env python3
"""
Main chat API module
"""
import json
import os
import signal
import traceback
import tornado.escape
import tornado.gen
import tornado.httpserver
import tornado.ioloop
import tornado.locks
import tornado.web
from jsonschema.exceptions import ValidationError
from db import DB, DatabaseError
from logging_utils import get_logger, init_logging
from messages import MessagesNewAPI
from messages import MessagesUpdatesAPI
from users import UsersAPI
from chats import ChatsAPI, ChatsUserAPI
from contacts import ContactsAPI
LOGGER = get_logger(__name__)
SERVER_VERSION = os.getenv('VERSION', 'unknown')
PUBLIC_API_PORT = 8888
DATABASE_LOCATION = os.getenv('DATABASE_LOCATION', '/tmp/cryptochat_db.json')
_SHUTDOWN_TIMEOUT = 3
class BaseHandler(tornado.web.RequestHandler):
"""Base handler setting CORS headers."""
messages_new_api = None
messages_updates_api = None
users_api = None
chats_api = None
chats_user_api = None
contacts_new_api = None
def data_received(self, chunk):
pass
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
def options(self):
"""Answer OPTIONS request."""
self.finish()
def get_post_data(self):
"""extract input JSON from POST request"""
json_data = ''
# check if JSON is passed as a file or as a body of POST request
if self.request.files:
json_data = self.request.files['file'][0][
'body'] # pick up only first file (index 0)
elif self.request.body:
json_data = self.request.body
try:
data = json.loads(json_data)
except ValueError:
data = None
return data
async def handle_request(self, api_endpoint, api_version):
"""Takes care of validation of input and execution of POST and GET methods."""
code = 400
data = self.get_post_data()
request_method = self.request.method.lower()
if data:
try:
# will call process_get or process_post methods for the given API
res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)
code = 200
except ValidationError as validerr:
if validerr.absolute_path:
res = '%s : %s' % (
validerr.absolute_path.pop(), validerr.message)
else:
res = '%s' % validerr.message
LOGGER.error('ValidationError: %s', res)
raise tornado.web.HTTPError(reason=res)
except ValueError as valuerr:
res = str(valuerr)
LOGGER.error('ValueError: %s', res)
raise tornado.web.HTTPError(reason=res)
except DatabaseError as dberr:
err_id = dberr.__hash__()
res = str(dberr.reason)
LOGGER.error(res)
LOGGER.info("Input data for <%s>: %s", err_id, data)
raise dberr
except Exception as err: # pylint: disable=broad-except
err_id = err.__hash__()
res = 'Internal server error <%s>:' \
'please include this error id in bug report.' % err_id
code = 500
LOGGER.exception(res)
LOGGER.info("Input data for <%s>: %s", err_id, data)
raise tornado.web.HTTPError(reason=res)
else:
res = 'Error: malformed input JSON.'
LOGGER.error(res)
raise tornado.web.HTTPError(reason=res)
# raise tornado.web.HTTPError(status_code=444, reason='error happened')
self.set_status(code)
self.write(res)
def write_error(self, status_code, **kwargs):
self.set_header('Content-Type', 'application/json')
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
lines = []
for line in traceback.format_exception(*kwargs["exc_info"]):
lines.append(line)
self.finish(json.dumps({
'error': {
'code': status_code,
'message': self._reason,
'traceback': lines,
}
}))
else:
self.finish(json.dumps({
'error': {
'code': status_code,
'message': self._reason,
}
}))
class MainHandler(BaseHandler):
"""Handler for the API root."""
def get(self):
"""Returns the root endpoint of the API."""
self.write(
'{"error": "cryptochat-server main page, '
'please refer to /api/message/new or /api/message/updates"}')
class MessageNewHandler(BaseHandler):
"""Post a new message to the chat room."""
async def post(self):
"""
Add a new message to the server.
"""
await self.handle_request(self.messages_new_api, 1)
class MessageUpdatesHandler(BaseHandler):
"""Long-polling request for new messages.
Waits until new messages are available before returning anything.
"""
async def post(self):
"""Checks for the new message updates, waits until
new messages are available."""
await self.handle_request(self.messages_updates_api, 1)
# def on_connection_close(self):
# self.wait_future.cancel()
class UsersHandler(BaseHandler):
"""Handler class providing /users POST requests."""
async def post(self):
"""Adds a new user to the database."""
await self.handle_request(self.users_api, 1)
async def get(self):
"""Returns details of particular user."""
await self.handle_request(self.users_api, 1)
class ChatsHandler(BaseHandler):
"""Handler providing /chats POST requests"""
async def post(self):
"""Adds a new chat to the database."""
await self.handle_request(self.chats_api, 1)
async def get(self):
"""Returns details of particular chat."""
await self.handle_request(self.chats_api, 1)
class ChatsUserHandler(BaseHandler):
"""Handler providing /chats/user GET requests"""
async def get(self):
"""Returns chats for the given user."""
await self.handle_request(self.chats_user_api, 1)
class ContactsNewHandler(BaseHandler):
"""Handler providing /contacts POST requests"""
async def post(self):
"""Adds a new contact to the database"""
await self.handle_request(self.contacts_new_api, 1)
async def get(self):
"""Returns details of particular contact."""
await self.handle_request(self.contacts_new_api, 1)
class Application(tornado.web.Application):
""" main cryptochat application class """
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/api/message/new", MessageNewHandler),
(r"/api/message/updates", MessageUpdatesHandler),
(r"/api/users", UsersHandler),
(r"/api/chats", ChatsHandler),
(r"/api/chats/user", ChatsUserHandler),
(r"/api/contacts", ContactsNewHandler),
]
tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False)
def main():
""" The main function. It creates cryptochat application, run everything."""
async def shutdown():
server.stop()
await tornado.gen.sleep(_SHUTDOWN_TIMEOUT)
tornado.ioloop.IOLoop.current().stop()
LOGGER.info("Server was successfully shut down.")
def exit_handler(sig, frame): # pylint: disable=unused-argument
def get_sig_name(sig):
return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig)
LOGGER.warning("Registered %s, shutting down.", get_sig_name(sig))
tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown)
signal.signal(signal.SIGTERM, exit_handler)
signal.signal(signal.SIGINT, exit_handler)
init_logging()
cryptochat_db = DB(DATABASE_LOCATION)
cryptochat_app = Application()
server = tornado.httpserver.HTTPServer(cryptochat_app)
server.bind(PUBLIC_API_PORT)
server.start()
LOGGER.info("Starting cryptochat (version %s).", SERVER_VERSION)
BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db)
BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db)
BaseHandler.users_api = UsersAPI(cryptochat_db)
BaseHandler.chats_api = ChatsAPI(cryptochat_db)
BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db)
BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "9f8d79d141d414c1256e39f58e59f97711acfee4",
"index": 4915,
"step-1": "<mask token>\n\n\nclass MainHandler(BaseHandler):\n <mask token>\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n <mask token>\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'Content-Type')\n <mask token>\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n if self.request.files:\n json_data = self.request.files['file'][0]['body']\n elif self.request.body:\n json_data = self.request.body\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n <mask token>\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n messages_new_api = None\n messages_updates_api = None\n users_api = None\n chats_api = None\n chats_user_api = None\n contacts_new_api = None\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'Content-Type')\n\n def options(self):\n \"\"\"Answer OPTIONS request.\"\"\"\n self.finish()\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n if self.request.files:\n json_data = self.request.files['file'][0]['body']\n elif self.request.body:\n json_data = self.request.body\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n\n def write_error(self, status_code, **kwargs):\n self.set_header('Content-Type', 'application/json')\n if self.settings.get('serve_traceback') and 'exc_info' in kwargs:\n lines = []\n for line in traceback.format_exception(*kwargs['exc_info']):\n lines.append(line)\n self.finish(json.dumps({'error': {'code': status_code,\n 'message': self._reason, 'traceback': lines}}))\n else:\n self.finish(json.dumps({'error': {'code': status_code,\n 'message': self._reason}}))\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"\nMain chat API module\n\"\"\"\n\nimport json\nimport os\nimport signal\nimport traceback\n\nimport tornado.escape\nimport tornado.gen\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.locks\nimport tornado.web\nfrom jsonschema.exceptions import ValidationError\n\nfrom db import DB, DatabaseError\nfrom logging_utils import get_logger, init_logging\nfrom messages import MessagesNewAPI\nfrom messages import MessagesUpdatesAPI\nfrom users import UsersAPI\nfrom chats import ChatsAPI, ChatsUserAPI\nfrom contacts import ContactsAPI\n\nLOGGER = get_logger(__name__)\nSERVER_VERSION = os.getenv('VERSION', 'unknown')\nPUBLIC_API_PORT = 8888\nDATABASE_LOCATION = os.getenv('DATABASE_LOCATION', '/tmp/cryptochat_db.json')\n_SHUTDOWN_TIMEOUT = 3\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"Base handler setting CORS headers.\"\"\"\n\n messages_new_api = None\n messages_updates_api = None\n users_api = None\n chats_api = None\n chats_user_api = None\n contacts_new_api = None\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n def options(self):\n \"\"\"Answer OPTIONS request.\"\"\"\n self.finish()\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n\n # check if JSON is passed as a file or as a body of POST request\n if self.request.files:\n json_data = self.request.files['file'][0][\n 'body'] # pick up only first file (index 0)\n elif self.request.body:\n json_data = self.request.body\n\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n # will call process_get or process_post methods for the given API\n res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (\n validerr.absolute_path.pop(), validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise dberr\n except Exception as err: # pylint: disable=broad-except\n err_id = err.__hash__()\n res = 'Internal server error <%s>:' \\\n 'please include this error id in bug report.' % err_id\n code = 500\n LOGGER.exception(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n\n # raise tornado.web.HTTPError(status_code=444, reason='error happened')\n self.set_status(code)\n self.write(res)\n\n def write_error(self, status_code, **kwargs):\n\n self.set_header('Content-Type', 'application/json')\n if self.settings.get(\"serve_traceback\") and \"exc_info\" in kwargs:\n # in debug mode, try to send a traceback\n lines = []\n for line in traceback.format_exception(*kwargs[\"exc_info\"]):\n lines.append(line)\n self.finish(json.dumps({\n 'error': {\n 'code': status_code,\n 'message': self._reason,\n 'traceback': lines,\n }\n }))\n else:\n self.finish(json.dumps({\n 'error': {\n 'code': status_code,\n 'message': self._reason,\n }\n }))\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, '\n 'please refer to /api/message/new or /api/message/updates\"}')\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n # def on_connection_close(self):\n # self.wait_future.cancel()\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [\n (r\"/\", MainHandler),\n (r\"/api/message/new\", MessageNewHandler),\n (r\"/api/message/updates\", MessageUpdatesHandler),\n (r\"/api/users\", UsersHandler),\n (r\"/api/chats\", ChatsHandler),\n (r\"/api/chats/user\", ChatsUserHandler),\n (r\"/api/contacts\", ContactsNewHandler),\n ]\n\n tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False)\n\n\ndef main():\n \"\"\" The main function. It creates cryptochat application, run everything.\"\"\"\n\n async def shutdown():\n server.stop()\n await tornado.gen.sleep(_SHUTDOWN_TIMEOUT)\n tornado.ioloop.IOLoop.current().stop()\n LOGGER.info(\"Server was successfully shut down.\")\n\n def exit_handler(sig, frame): # pylint: disable=unused-argument\n def get_sig_name(sig):\n return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))\n if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig)\n\n LOGGER.warning(\"Registered %s, shutting down.\", get_sig_name(sig))\n tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown)\n\n signal.signal(signal.SIGTERM, exit_handler)\n signal.signal(signal.SIGINT, exit_handler)\n\n init_logging()\n cryptochat_db = DB(DATABASE_LOCATION)\n\n cryptochat_app = Application()\n server = tornado.httpserver.HTTPServer(cryptochat_app)\n server.bind(PUBLIC_API_PORT)\n server.start()\n LOGGER.info(\"Starting cryptochat (version %s).\", SERVER_VERSION)\n\n BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db)\n BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db)\n BaseHandler.users_api = UsersAPI(cryptochat_db)\n BaseHandler.chats_api = ChatsAPI(cryptochat_db)\n BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db)\n BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db)\n\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
17,
19,
22,
25,
31
]
}
|
[
17,
19,
22,
25,
31
] |
#!/usr/bin/python
import socket, os, datetime, time, re, sys
import numpy as np
import matplotlib.pyplot as plt
from baseband import vdif
import astropy.units as u
from scipy.signal import resample_poly
import matplotlib.patches as patches
def fbcmd(message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, int(port)))
sock.send(message.encode()) # convert message to bytestring
if DEBUG:
print('INFO: sent to '+ip+':'+port + ':' + message)
data = sock.recv(1024)
if DEBUG:
print('INFO: answer: ', data.decode())
sock.close()
return data.decode()
def get_singlefile_data(vbsname):
# TODO: Thread/IF selection in vmux step
disk2fileout = scriptdir+"/checkdata.vdif"
vmuxedfile = disk2fileout +".vmuxed"
ss = fbcmd("scan_set="+vbsname+":+2.0s:+"+extractiontime)
if " does not exist" in ss:
return [False, -1, 0, -1] # No single file data found
sc = fbcmd("scan_check?")
nbbcs = int(int(sc.split(":")[4])/2)
fbcmd("disk2file=" + disk2fileout + ":::w")
nwait = 0
time.sleep(0.25) # Wait for disk2file
while True:
stat = fbcmd("disk2file?")
if "inactive" in stat:
break
if nwait>5:
print("ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...")
sys.exit(1)
time.sleep(1) # Wait for disk2file
nwait+=1
vmuxcmd = "vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}".format(disk2fileout, vmuxedfile)
os.system(vmuxcmd)
time.sleep(5) # Wait for vmux
# Read file
fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.
start_time = fh.info()['start_time']
# Ensure file pointer is at beginning of file
fh.seek(0)
# Read all data until end
ifdata = fh.read()
# Close infile
fh.close()
return [True, nbbcs, ifdata, start_time]
def get_multifile_data(vbs, nif):
vbsname = vbs+"_"+str(nif)
disk2fileout = scriptdir+"/checkdata.vdif"
ss = fbcmd("scan_set="+vbsname+":+2.0s:+"+extractiontime)
if " does not exist" in ss:
return [-1, 0, -1]
sc = fbcmd("scan_check?")
nbbcs = int(int(sc.split(":")[4])/2)
fbcmd("disk2file=" + disk2fileout + ":::w")
nwait = 0
time.sleep(0.25) # Wait for disk2file
while True:
stat = fbcmd("disk2file?")
if "inactive" in stat:
break
if nwait>5:
print("ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...")
sys.exit(1)
time.sleep(1) # Wait for disk2file
nwait+=1
# Read file
fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.
start_time = fh.info()['start_time']
# Ensure file pointer is at beginning of file
fh.seek(0)
# Read all data until end
ifdata = fh.read()
# Close infile
fh.close()
return [nbbcs, ifdata, start_time]
def plot_bbc(bbcdata, bbc, nif):
row=(nrows-1)-nif
col=bbc-nif*bbcsperIF # Assume nbbcs always the same
nfft = bbcdata.size
states = np.unique(bbcdata, return_counts=True)
sampler_stats = states[1]/nfft
ps = np.abs(np.fft.fft(bbcdata))**2
time_step = 1.0/sample_rate
freqs = np.fft.fftfreq(nfft, time_step)
idx = np.argsort(freqs)
# Spectrum is conjugate from - to +, only plot half...
nplot = int(nfft/2)
ps2plot = ps[idx][nplot:]
# Decimate signal to 128 points
down = int(nplot/nspec)
ps2plot_dec = resample_poly(ps2plot, 1, down)
fr2plot = np.linspace(0,bbcw, nspec)
# Plot
if nif%2==0:
color = "black"
else:
color= "red"
ax = axs[row][col]
ax.plot(fr2plot, ps2plot_dec, color=color)
if col==0:
ax.set_ylabel("IF "+ str(iflabels[nif]) + "\n"+str(start_time)[:-5].replace("T","\n"), rotation=0, ha='right', va="center")
ax.text(0.5, 0.35, "BBC{0:03d}".format(bbc+1), transform=ax.transAxes, ha="center")
#print("BBC{0:03d} sampler stats: {1} %".format(bbc+1, np.round(100*sampler_stats,1)))
start=0
for i,stat in enumerate(sampler_stats):
#if i%2==0:
if i in [0,3]:
scol = "blue"
else:
scol = "green"
ax.add_patch(patches.Rectangle( (start,0), width=stat, height=0.25, edgecolor="black", facecolor = scol, fill=True, transform=ax.transAxes))
start +=stat
itot = 0
for i in [0.18,0.33,0.33]: # last 0.18 not necessary
itot+=i
ax.axvline(x=itot*bbcw)
ax.set_xlim([0,bbcw])
ip = sys.argv[1] #ip = "localhost"
port = sys.argv[2] #port = "2621" # jive5ab control port
bbcw = int(sys.argv[3]) #bbcw = 32 # MHz, width of BBC
nspec = int(sys.argv[4]) #nspec = 256 # number of points in final spectrum
bbcsperIF = int(sys.argv[5]) #bbcsperIF = 8
DEBUG=False# Print jive5ab return messages, which are parsed for results
ifs2plot = [0,1,2,3,4,5,6,7] # List IFs to plot, starting from 0.
#Plot design
nrows = 8
ncols = bbcsperIF
extractiontime = "0.01s" # At least 0.01s
iflabels = ["A", "B", "C", "D", "E", "F", "G", "H"]
plt.rcParams.update({'font.size': 8})
sample_rate = 2*bbcw # MHz
scriptdir=os.path.dirname(os.path.realpath(__file__))
scres = fbcmd("scan_check?")
if "does not exist" in scres:
vbsfile = scres.split(":")[1].split("'")[1].strip()
else:
vbsfile = scres.split(":")[2].strip() # ignore spaces around filename
if vbsfile[-2]=="_":
# Multi-file name, ignore the suffix for the initial pattern
vbsfile = vbsfile[:-2]
print("Processing VBS name " + vbsfile)
#vbsname = "testrec_freja_210526_161523"
# Prepare plot
f,axs = plt.subplots(nrows, ncols, sharex=True, figsize=(8,4), dpi=300)
for a in axs:
for b in a:
b.set_yscale("log")
b.yaxis.set_major_locator(plt.NullLocator())
b.yaxis.set_minor_locator(plt.NullLocator())
b.xaxis.set_major_locator(plt.NullLocator())
b.xaxis.set_minor_locator(plt.NullLocator())
# Remove top double line except from top row
if not b in axs[0]:
b.spines["top"].set_visible(False)
plt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace=0, wspace=0)
# Check if dealing with single-file. If so, vmux, then read all data sequentially and split
singlefile, nbbcs, data, start_time = get_singlefile_data(vbsfile)
if not singlefile:
recmode = "multifile"
# Failed single-file, try multi-file:
for nif in ifs2plot:
nbbcs, data, start_time = get_multifile_data(vbsfile, nif)
if nbbcs>0: #Check if data was found
for i in range(nbbcs):
bbc = nbbcs*nif + i
# Slice out bbc from all data
bbcdata = data[:, i].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3
plot_bbc(bbcdata, bbc, nif)
else:
# Singlefile, so step through all BBCs, assuming bbcperif BBCs for each IF
recmode = "vmuxed"
for bbc in range(nbbcs):
nif = int(bbc/bbcsperIF)
# Slice out bbc from all data
bbcdata = data[:, bbc].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3
plot_bbc(bbcdata, bbc, nif)
f.suptitle(vbsfile+": " + recmode + ", "+extractiontime + ". log10 spectra: {} points per {} MHz. Blue/green = sampler stats.".format(nspec,bbcw))
f.savefig(scriptdir+"/bandpass.pdf",dpi=300)
|
normal
|
{
"blob_id": "8eb08fa497ccf3ddc8f4d2b886c9e5a9bdb2e052",
"index": 8006,
"step-1": "<mask token>\n\n\ndef fbcmd(message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, int(port)))\n sock.send(message.encode())\n if DEBUG:\n print('INFO: sent to ' + ip + ':' + port + ':' + message)\n data = sock.recv(1024)\n if DEBUG:\n print('INFO: answer: ', data.decode())\n sock.close()\n return data.decode()\n\n\ndef get_singlefile_data(vbsname):\n disk2fileout = scriptdir + '/checkdata.vdif'\n vmuxedfile = disk2fileout + '.vmuxed'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [False, -1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n vmuxcmd = 'vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}'.format(disk2fileout,\n vmuxedfile)\n os.system(vmuxcmd)\n time.sleep(5)\n fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [True, nbbcs, ifdata, start_time]\n\n\ndef get_multifile_data(vbs, nif):\n vbsname = vbs + '_' + str(nif)\n disk2fileout = scriptdir + '/checkdata.vdif'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [-1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [nbbcs, ifdata, start_time]\n\n\ndef plot_bbc(bbcdata, bbc, nif):\n row = nrows - 1 - nif\n col = bbc - nif * bbcsperIF\n nfft = bbcdata.size\n states = np.unique(bbcdata, return_counts=True)\n sampler_stats = states[1] / nfft\n ps = np.abs(np.fft.fft(bbcdata)) ** 2\n time_step = 1.0 / sample_rate\n freqs = np.fft.fftfreq(nfft, time_step)\n idx = np.argsort(freqs)\n nplot = int(nfft / 2)\n ps2plot = ps[idx][nplot:]\n down = int(nplot / nspec)\n ps2plot_dec = resample_poly(ps2plot, 1, down)\n fr2plot = np.linspace(0, bbcw, nspec)\n if nif % 2 == 0:\n color = 'black'\n else:\n color = 'red'\n ax = axs[row][col]\n ax.plot(fr2plot, ps2plot_dec, color=color)\n if col == 0:\n ax.set_ylabel('IF ' + str(iflabels[nif]) + '\\n' + str(start_time)[:\n -5].replace('T', '\\n'), rotation=0, ha='right', va='center')\n ax.text(0.5, 0.35, 'BBC{0:03d}'.format(bbc + 1), transform=ax.transAxes,\n ha='center')\n start = 0\n for i, stat in enumerate(sampler_stats):\n if i in [0, 3]:\n scol = 'blue'\n else:\n scol = 'green'\n ax.add_patch(patches.Rectangle((start, 0), width=stat, height=0.25,\n edgecolor='black', facecolor=scol, fill=True, transform=ax.\n transAxes))\n start += stat\n itot = 0\n for i in [0.18, 0.33, 0.33]:\n itot += i\n ax.axvline(x=itot * bbcw)\n ax.set_xlim([0, bbcw])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fbcmd(message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, int(port)))\n sock.send(message.encode())\n if DEBUG:\n print('INFO: sent to ' + ip + ':' + port + ':' + message)\n data = sock.recv(1024)\n if DEBUG:\n print('INFO: answer: ', data.decode())\n sock.close()\n return data.decode()\n\n\ndef get_singlefile_data(vbsname):\n disk2fileout = scriptdir + '/checkdata.vdif'\n vmuxedfile = disk2fileout + '.vmuxed'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [False, -1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n vmuxcmd = 'vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}'.format(disk2fileout,\n vmuxedfile)\n os.system(vmuxcmd)\n time.sleep(5)\n fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [True, nbbcs, ifdata, start_time]\n\n\ndef get_multifile_data(vbs, nif):\n vbsname = vbs + '_' + str(nif)\n disk2fileout = scriptdir + '/checkdata.vdif'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [-1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [nbbcs, ifdata, start_time]\n\n\ndef plot_bbc(bbcdata, bbc, nif):\n row = nrows - 1 - nif\n col = bbc - nif * bbcsperIF\n nfft = bbcdata.size\n states = np.unique(bbcdata, return_counts=True)\n sampler_stats = states[1] / nfft\n ps = np.abs(np.fft.fft(bbcdata)) ** 2\n time_step = 1.0 / sample_rate\n freqs = np.fft.fftfreq(nfft, time_step)\n idx = np.argsort(freqs)\n nplot = int(nfft / 2)\n ps2plot = ps[idx][nplot:]\n down = int(nplot / nspec)\n ps2plot_dec = resample_poly(ps2plot, 1, down)\n fr2plot = np.linspace(0, bbcw, nspec)\n if nif % 2 == 0:\n color = 'black'\n else:\n color = 'red'\n ax = axs[row][col]\n ax.plot(fr2plot, ps2plot_dec, color=color)\n if col == 0:\n ax.set_ylabel('IF ' + str(iflabels[nif]) + '\\n' + str(start_time)[:\n -5].replace('T', '\\n'), rotation=0, ha='right', va='center')\n ax.text(0.5, 0.35, 'BBC{0:03d}'.format(bbc + 1), transform=ax.transAxes,\n ha='center')\n start = 0\n for i, stat in enumerate(sampler_stats):\n if i in [0, 3]:\n scol = 'blue'\n else:\n scol = 'green'\n ax.add_patch(patches.Rectangle((start, 0), width=stat, height=0.25,\n edgecolor='black', facecolor=scol, fill=True, transform=ax.\n transAxes))\n start += stat\n itot = 0\n for i in [0.18, 0.33, 0.33]:\n itot += i\n ax.axvline(x=itot * bbcw)\n ax.set_xlim([0, bbcw])\n\n\n<mask token>\nplt.rcParams.update({'font.size': 8})\n<mask token>\nif 'does not exist' in scres:\n vbsfile = scres.split(':')[1].split(\"'\")[1].strip()\nelse:\n vbsfile = scres.split(':')[2].strip()\nif vbsfile[-2] == '_':\n vbsfile = vbsfile[:-2]\nprint('Processing VBS name ' + vbsfile)\n<mask token>\nfor a in axs:\n for b in a:\n b.set_yscale('log')\n b.yaxis.set_major_locator(plt.NullLocator())\n b.yaxis.set_minor_locator(plt.NullLocator())\n b.xaxis.set_major_locator(plt.NullLocator())\n b.xaxis.set_minor_locator(plt.NullLocator())\n if not b in axs[0]:\n b.spines['top'].set_visible(False)\nplt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace\n =0, wspace=0)\n<mask token>\nif not singlefile:\n recmode = 'multifile'\n for nif in ifs2plot:\n nbbcs, data, start_time = get_multifile_data(vbsfile, nif)\n if nbbcs > 0:\n for i in range(nbbcs):\n bbc = nbbcs * nif + i\n bbcdata = data[:, i].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nelse:\n recmode = 'vmuxed'\n for bbc in range(nbbcs):\n nif = int(bbc / bbcsperIF)\n bbcdata = data[:, bbc].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nf.suptitle(vbsfile + ': ' + recmode + ', ' + extractiontime +\n '. log10 spectra: {} points per {} MHz. Blue/green = sampler stats.'.\n format(nspec, bbcw))\nf.savefig(scriptdir + '/bandpass.pdf', dpi=300)\n",
"step-3": "<mask token>\n\n\ndef fbcmd(message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, int(port)))\n sock.send(message.encode())\n if DEBUG:\n print('INFO: sent to ' + ip + ':' + port + ':' + message)\n data = sock.recv(1024)\n if DEBUG:\n print('INFO: answer: ', data.decode())\n sock.close()\n return data.decode()\n\n\ndef get_singlefile_data(vbsname):\n disk2fileout = scriptdir + '/checkdata.vdif'\n vmuxedfile = disk2fileout + '.vmuxed'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [False, -1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n vmuxcmd = 'vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}'.format(disk2fileout,\n vmuxedfile)\n os.system(vmuxcmd)\n time.sleep(5)\n fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [True, nbbcs, ifdata, start_time]\n\n\ndef get_multifile_data(vbs, nif):\n vbsname = vbs + '_' + str(nif)\n disk2fileout = scriptdir + '/checkdata.vdif'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [-1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [nbbcs, ifdata, start_time]\n\n\ndef plot_bbc(bbcdata, bbc, nif):\n row = nrows - 1 - nif\n col = bbc - nif * bbcsperIF\n nfft = bbcdata.size\n states = np.unique(bbcdata, return_counts=True)\n sampler_stats = states[1] / nfft\n ps = np.abs(np.fft.fft(bbcdata)) ** 2\n time_step = 1.0 / sample_rate\n freqs = np.fft.fftfreq(nfft, time_step)\n idx = np.argsort(freqs)\n nplot = int(nfft / 2)\n ps2plot = ps[idx][nplot:]\n down = int(nplot / nspec)\n ps2plot_dec = resample_poly(ps2plot, 1, down)\n fr2plot = np.linspace(0, bbcw, nspec)\n if nif % 2 == 0:\n color = 'black'\n else:\n color = 'red'\n ax = axs[row][col]\n ax.plot(fr2plot, ps2plot_dec, color=color)\n if col == 0:\n ax.set_ylabel('IF ' + str(iflabels[nif]) + '\\n' + str(start_time)[:\n -5].replace('T', '\\n'), rotation=0, ha='right', va='center')\n ax.text(0.5, 0.35, 'BBC{0:03d}'.format(bbc + 1), transform=ax.transAxes,\n ha='center')\n start = 0\n for i, stat in enumerate(sampler_stats):\n if i in [0, 3]:\n scol = 'blue'\n else:\n scol = 'green'\n ax.add_patch(patches.Rectangle((start, 0), width=stat, height=0.25,\n edgecolor='black', facecolor=scol, fill=True, transform=ax.\n transAxes))\n start += stat\n itot = 0\n for i in [0.18, 0.33, 0.33]:\n itot += i\n ax.axvline(x=itot * bbcw)\n ax.set_xlim([0, bbcw])\n\n\nip = sys.argv[1]\nport = sys.argv[2]\nbbcw = int(sys.argv[3])\nnspec = int(sys.argv[4])\nbbcsperIF = int(sys.argv[5])\nDEBUG = False\nifs2plot = [0, 1, 2, 3, 4, 5, 6, 7]\nnrows = 8\nncols = bbcsperIF\nextractiontime = '0.01s'\niflabels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\nplt.rcParams.update({'font.size': 8})\nsample_rate = 2 * bbcw\nscriptdir = os.path.dirname(os.path.realpath(__file__))\nscres = fbcmd('scan_check?')\nif 'does not exist' in scres:\n vbsfile = scres.split(':')[1].split(\"'\")[1].strip()\nelse:\n vbsfile = scres.split(':')[2].strip()\nif vbsfile[-2] == '_':\n vbsfile = vbsfile[:-2]\nprint('Processing VBS name ' + vbsfile)\nf, axs = plt.subplots(nrows, ncols, sharex=True, figsize=(8, 4), dpi=300)\nfor a in axs:\n for b in a:\n b.set_yscale('log')\n b.yaxis.set_major_locator(plt.NullLocator())\n b.yaxis.set_minor_locator(plt.NullLocator())\n b.xaxis.set_major_locator(plt.NullLocator())\n b.xaxis.set_minor_locator(plt.NullLocator())\n if not b in axs[0]:\n b.spines['top'].set_visible(False)\nplt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace\n =0, wspace=0)\nsinglefile, nbbcs, data, start_time = get_singlefile_data(vbsfile)\nif not singlefile:\n recmode = 'multifile'\n for nif in ifs2plot:\n nbbcs, data, start_time = get_multifile_data(vbsfile, nif)\n if nbbcs > 0:\n for i in range(nbbcs):\n bbc = nbbcs * nif + i\n bbcdata = data[:, i].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nelse:\n recmode = 'vmuxed'\n for bbc in range(nbbcs):\n nif = int(bbc / bbcsperIF)\n bbcdata = data[:, bbc].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nf.suptitle(vbsfile + ': ' + recmode + ', ' + extractiontime +\n '. log10 spectra: {} points per {} MHz. Blue/green = sampler stats.'.\n format(nspec, bbcw))\nf.savefig(scriptdir + '/bandpass.pdf', dpi=300)\n",
"step-4": "import socket, os, datetime, time, re, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom baseband import vdif\nimport astropy.units as u\nfrom scipy.signal import resample_poly\nimport matplotlib.patches as patches\n\n\ndef fbcmd(message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, int(port)))\n sock.send(message.encode())\n if DEBUG:\n print('INFO: sent to ' + ip + ':' + port + ':' + message)\n data = sock.recv(1024)\n if DEBUG:\n print('INFO: answer: ', data.decode())\n sock.close()\n return data.decode()\n\n\ndef get_singlefile_data(vbsname):\n disk2fileout = scriptdir + '/checkdata.vdif'\n vmuxedfile = disk2fileout + '.vmuxed'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [False, -1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n vmuxcmd = 'vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}'.format(disk2fileout,\n vmuxedfile)\n os.system(vmuxcmd)\n time.sleep(5)\n fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [True, nbbcs, ifdata, start_time]\n\n\ndef get_multifile_data(vbs, nif):\n vbsname = vbs + '_' + str(nif)\n disk2fileout = scriptdir + '/checkdata.vdif'\n ss = fbcmd('scan_set=' + vbsname + ':+2.0s:+' + extractiontime)\n if ' does not exist' in ss:\n return [-1, 0, -1]\n sc = fbcmd('scan_check?')\n nbbcs = int(int(sc.split(':')[4]) / 2)\n fbcmd('disk2file=' + disk2fileout + ':::w')\n nwait = 0\n time.sleep(0.25)\n while True:\n stat = fbcmd('disk2file?')\n if 'inactive' in stat:\n break\n if nwait > 5:\n print(\n 'ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...'\n )\n sys.exit(1)\n time.sleep(1)\n nwait += 1\n fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate * u.MHz)\n start_time = fh.info()['start_time']\n fh.seek(0)\n ifdata = fh.read()\n fh.close()\n return [nbbcs, ifdata, start_time]\n\n\ndef plot_bbc(bbcdata, bbc, nif):\n row = nrows - 1 - nif\n col = bbc - nif * bbcsperIF\n nfft = bbcdata.size\n states = np.unique(bbcdata, return_counts=True)\n sampler_stats = states[1] / nfft\n ps = np.abs(np.fft.fft(bbcdata)) ** 2\n time_step = 1.0 / sample_rate\n freqs = np.fft.fftfreq(nfft, time_step)\n idx = np.argsort(freqs)\n nplot = int(nfft / 2)\n ps2plot = ps[idx][nplot:]\n down = int(nplot / nspec)\n ps2plot_dec = resample_poly(ps2plot, 1, down)\n fr2plot = np.linspace(0, bbcw, nspec)\n if nif % 2 == 0:\n color = 'black'\n else:\n color = 'red'\n ax = axs[row][col]\n ax.plot(fr2plot, ps2plot_dec, color=color)\n if col == 0:\n ax.set_ylabel('IF ' + str(iflabels[nif]) + '\\n' + str(start_time)[:\n -5].replace('T', '\\n'), rotation=0, ha='right', va='center')\n ax.text(0.5, 0.35, 'BBC{0:03d}'.format(bbc + 1), transform=ax.transAxes,\n ha='center')\n start = 0\n for i, stat in enumerate(sampler_stats):\n if i in [0, 3]:\n scol = 'blue'\n else:\n scol = 'green'\n ax.add_patch(patches.Rectangle((start, 0), width=stat, height=0.25,\n edgecolor='black', facecolor=scol, fill=True, transform=ax.\n transAxes))\n start += stat\n itot = 0\n for i in [0.18, 0.33, 0.33]:\n itot += i\n ax.axvline(x=itot * bbcw)\n ax.set_xlim([0, bbcw])\n\n\nip = sys.argv[1]\nport = sys.argv[2]\nbbcw = int(sys.argv[3])\nnspec = int(sys.argv[4])\nbbcsperIF = int(sys.argv[5])\nDEBUG = False\nifs2plot = [0, 1, 2, 3, 4, 5, 6, 7]\nnrows = 8\nncols = bbcsperIF\nextractiontime = '0.01s'\niflabels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\nplt.rcParams.update({'font.size': 8})\nsample_rate = 2 * bbcw\nscriptdir = os.path.dirname(os.path.realpath(__file__))\nscres = fbcmd('scan_check?')\nif 'does not exist' in scres:\n vbsfile = scres.split(':')[1].split(\"'\")[1].strip()\nelse:\n vbsfile = scres.split(':')[2].strip()\nif vbsfile[-2] == '_':\n vbsfile = vbsfile[:-2]\nprint('Processing VBS name ' + vbsfile)\nf, axs = plt.subplots(nrows, ncols, sharex=True, figsize=(8, 4), dpi=300)\nfor a in axs:\n for b in a:\n b.set_yscale('log')\n b.yaxis.set_major_locator(plt.NullLocator())\n b.yaxis.set_minor_locator(plt.NullLocator())\n b.xaxis.set_major_locator(plt.NullLocator())\n b.xaxis.set_minor_locator(plt.NullLocator())\n if not b in axs[0]:\n b.spines['top'].set_visible(False)\nplt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace\n =0, wspace=0)\nsinglefile, nbbcs, data, start_time = get_singlefile_data(vbsfile)\nif not singlefile:\n recmode = 'multifile'\n for nif in ifs2plot:\n nbbcs, data, start_time = get_multifile_data(vbsfile, nif)\n if nbbcs > 0:\n for i in range(nbbcs):\n bbc = nbbcs * nif + i\n bbcdata = data[:, i].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nelse:\n recmode = 'vmuxed'\n for bbc in range(nbbcs):\n nif = int(bbc / bbcsperIF)\n bbcdata = data[:, bbc].astype(int)\n plot_bbc(bbcdata, bbc, nif)\nf.suptitle(vbsfile + ': ' + recmode + ', ' + extractiontime +\n '. log10 spectra: {} points per {} MHz. Blue/green = sampler stats.'.\n format(nspec, bbcw))\nf.savefig(scriptdir + '/bandpass.pdf', dpi=300)\n",
"step-5": "#!/usr/bin/python\nimport socket, os, datetime, time, re, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom baseband import vdif\nimport astropy.units as u\nfrom scipy.signal import resample_poly\nimport matplotlib.patches as patches\n\ndef fbcmd(message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, int(port)))\n sock.send(message.encode()) # convert message to bytestring\n if DEBUG:\n print('INFO: sent to '+ip+':'+port + ':' + message)\n data = sock.recv(1024)\n if DEBUG:\n print('INFO: answer: ', data.decode())\n sock.close()\n return data.decode()\n\ndef get_singlefile_data(vbsname):\n # TODO: Thread/IF selection in vmux step\n disk2fileout = scriptdir+\"/checkdata.vdif\"\n vmuxedfile = disk2fileout +\".vmuxed\"\n ss = fbcmd(\"scan_set=\"+vbsname+\":+2.0s:+\"+extractiontime)\n if \" does not exist\" in ss:\n return [False, -1, 0, -1] # No single file data found\n sc = fbcmd(\"scan_check?\")\n nbbcs = int(int(sc.split(\":\")[4])/2)\n fbcmd(\"disk2file=\" + disk2fileout + \":::w\")\n nwait = 0\n time.sleep(0.25) # Wait for disk2file\n while True:\n stat = fbcmd(\"disk2file?\")\n if \"inactive\" in stat:\n break\n if nwait>5:\n print(\"ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...\")\n sys.exit(1)\n time.sleep(1) # Wait for disk2file\n nwait+=1\n vmuxcmd = \"vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}\".format(disk2fileout, vmuxedfile)\n os.system(vmuxcmd)\n time.sleep(5) # Wait for vmux\n # Read file\n fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.\n start_time = fh.info()['start_time']\n # Ensure file pointer is at beginning of file\n fh.seek(0)\n # Read all data until end\n ifdata = fh.read()\n # Close infile\n fh.close()\n return [True, nbbcs, ifdata, start_time]\n\ndef get_multifile_data(vbs, nif):\n vbsname = vbs+\"_\"+str(nif)\n disk2fileout = scriptdir+\"/checkdata.vdif\"\n ss = fbcmd(\"scan_set=\"+vbsname+\":+2.0s:+\"+extractiontime)\n if \" does not exist\" in ss:\n return [-1, 0, -1]\n sc = fbcmd(\"scan_check?\")\n nbbcs = int(int(sc.split(\":\")[4])/2)\n fbcmd(\"disk2file=\" + disk2fileout + \":::w\")\n nwait = 0\n time.sleep(0.25) # Wait for disk2file\n while True:\n stat = fbcmd(\"disk2file?\")\n if \"inactive\" in stat:\n break\n if nwait>5:\n print(\"ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...\")\n sys.exit(1)\n time.sleep(1) # Wait for disk2file\n nwait+=1\n # Read file\n fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.\n start_time = fh.info()['start_time']\n # Ensure file pointer is at beginning of file\n fh.seek(0)\n # Read all data until end\n ifdata = fh.read()\n # Close infile\n fh.close()\n return [nbbcs, ifdata, start_time]\n\ndef plot_bbc(bbcdata, bbc, nif):\n row=(nrows-1)-nif\n col=bbc-nif*bbcsperIF # Assume nbbcs always the same\n nfft = bbcdata.size\n states = np.unique(bbcdata, return_counts=True)\n sampler_stats = states[1]/nfft\n \n ps = np.abs(np.fft.fft(bbcdata))**2\n time_step = 1.0/sample_rate\n freqs = np.fft.fftfreq(nfft, time_step)\n idx = np.argsort(freqs)\n \n # Spectrum is conjugate from - to +, only plot half...\n nplot = int(nfft/2) \n ps2plot = ps[idx][nplot:]\n \n # Decimate signal to 128 points\n down = int(nplot/nspec)\n ps2plot_dec = resample_poly(ps2plot, 1, down)\n fr2plot = np.linspace(0,bbcw, nspec)\n \n # Plot\n if nif%2==0:\n color = \"black\"\n else:\n color= \"red\"\n ax = axs[row][col]\n ax.plot(fr2plot, ps2plot_dec, color=color)\n if col==0:\n ax.set_ylabel(\"IF \"+ str(iflabels[nif]) + \"\\n\"+str(start_time)[:-5].replace(\"T\",\"\\n\"), rotation=0, ha='right', va=\"center\")\n ax.text(0.5, 0.35, \"BBC{0:03d}\".format(bbc+1), transform=ax.transAxes, ha=\"center\")\n #print(\"BBC{0:03d} sampler stats: {1} %\".format(bbc+1, np.round(100*sampler_stats,1)))\n start=0\n for i,stat in enumerate(sampler_stats):\n #if i%2==0:\n if i in [0,3]:\n scol = \"blue\"\n else:\n scol = \"green\"\n ax.add_patch(patches.Rectangle( (start,0), width=stat, height=0.25, edgecolor=\"black\", facecolor = scol, fill=True, transform=ax.transAxes))\n start +=stat\n itot = 0\n for i in [0.18,0.33,0.33]: # last 0.18 not necessary\n itot+=i\n ax.axvline(x=itot*bbcw)\n ax.set_xlim([0,bbcw])\n\nip = sys.argv[1] #ip = \"localhost\"\nport = sys.argv[2] #port = \"2621\" # jive5ab control port\nbbcw = int(sys.argv[3]) #bbcw = 32 # MHz, width of BBC\nnspec = int(sys.argv[4]) #nspec = 256 # number of points in final spectrum\nbbcsperIF = int(sys.argv[5]) #bbcsperIF = 8\n\nDEBUG=False# Print jive5ab return messages, which are parsed for results\n\nifs2plot = [0,1,2,3,4,5,6,7] # List IFs to plot, starting from 0. \n#Plot design\nnrows = 8\nncols = bbcsperIF\nextractiontime = \"0.01s\" # At least 0.01s\niflabels = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]\n\nplt.rcParams.update({'font.size': 8})\nsample_rate = 2*bbcw # MHz\nscriptdir=os.path.dirname(os.path.realpath(__file__))\n\nscres = fbcmd(\"scan_check?\")\nif \"does not exist\" in scres:\n vbsfile = scres.split(\":\")[1].split(\"'\")[1].strip()\nelse:\n vbsfile = scres.split(\":\")[2].strip() # ignore spaces around filename\nif vbsfile[-2]==\"_\":\n # Multi-file name, ignore the suffix for the initial pattern\n vbsfile = vbsfile[:-2]\nprint(\"Processing VBS name \" + vbsfile)\n\n#vbsname = \"testrec_freja_210526_161523\"\n# Prepare plot\nf,axs = plt.subplots(nrows, ncols, sharex=True, figsize=(8,4), dpi=300)\nfor a in axs:\n for b in a:\n b.set_yscale(\"log\")\n b.yaxis.set_major_locator(plt.NullLocator())\n b.yaxis.set_minor_locator(plt.NullLocator())\n b.xaxis.set_major_locator(plt.NullLocator())\n b.xaxis.set_minor_locator(plt.NullLocator())\n # Remove top double line except from top row\n if not b in axs[0]:\n b.spines[\"top\"].set_visible(False)\nplt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace=0, wspace=0)\n\n# Check if dealing with single-file. If so, vmux, then read all data sequentially and split\nsinglefile, nbbcs, data, start_time = get_singlefile_data(vbsfile)\nif not singlefile:\n recmode = \"multifile\"\n # Failed single-file, try multi-file:\n for nif in ifs2plot:\n nbbcs, data, start_time = get_multifile_data(vbsfile, nif)\n if nbbcs>0: #Check if data was found\n for i in range(nbbcs):\n bbc = nbbcs*nif + i\n # Slice out bbc from all data\n bbcdata = data[:, i].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3\n plot_bbc(bbcdata, bbc, nif)\nelse:\n # Singlefile, so step through all BBCs, assuming bbcperif BBCs for each IF\n recmode = \"vmuxed\"\n for bbc in range(nbbcs):\n nif = int(bbc/bbcsperIF)\n # Slice out bbc from all data\n bbcdata = data[:, bbc].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3\n plot_bbc(bbcdata, bbc, nif)\n\nf.suptitle(vbsfile+\": \" + recmode + \", \"+extractiontime + \". log10 spectra: {} points per {} MHz. Blue/green = sampler stats.\".format(nspec,bbcw))\nf.savefig(scriptdir+\"/bandpass.pdf\",dpi=300)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.0.8 on 2020-08-28 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20200828_1836'),
]
operations = [
migrations.AddField(
model_name='order',
name='total',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='order',
name='items',
field=models.CharField(max_length=300),
),
]
|
normal
|
{
"blob_id": "1f7d770106ea8e7d1c0bb90e1fc576b7ee2f0220",
"index": 381,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0003_auto_20200828_1836')]\n operations = [migrations.AddField(model_name='order', name='total',\n field=models.CharField(default=0, max_length=200), preserve_default\n =False), migrations.AlterField(model_name='order', name='items',\n field=models.CharField(max_length=300))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0003_auto_20200828_1836')]\n operations = [migrations.AddField(model_name='order', name='total',\n field=models.CharField(default=0, max_length=200), preserve_default\n =False), migrations.AlterField(model_name='order', name='items',\n field=models.CharField(max_length=300))]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-08-28 17:37\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0003_auto_20200828_1836'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='total',\n field=models.CharField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='order',\n name='items',\n field=models.CharField(max_length=300),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math as m
def calcula_elongacao(A, ϕ, ω, t):
x = A * m.cos(ϕ + ϕ * t )
return x
|
normal
|
{
"blob_id": "225687729b64f455bcc841e83105c7444efdfad3",
"index": 5545,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calcula_elongacao(A, φ, ω, t):\n x = A * m.cos(φ + φ * t)\n return x\n",
"step-3": "import math as m\n\n\ndef calcula_elongacao(A, φ, ω, t):\n x = A * m.cos(φ + φ * t)\n return x\n",
"step-4": "import math as m\n\ndef calcula_elongacao(A, ϕ, ω, t):\n x = A * m.cos(ϕ + ϕ * t )\n return x",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')
te_exp = te_exp[:, -1]
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data,
testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1])
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1])
for i in range(training_data.shape[0]):
predicted = sigmoid(w, training_data[i])
diff = np.subtract(predicted, training_expected[i])
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(gradient_batch, np.multiply(normalized,
reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(w, training_data,
training_expected))
testing_accuracies.append(check_accuracy(w, testing_data,
testing_expected))
return training_accuracies, testing_accuracies
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')
te_exp = te_exp[:, -1]
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data,
testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1])
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1])
for i in range(training_data.shape[0]):
predicted = sigmoid(w, training_data[i])
diff = np.subtract(predicted, training_expected[i])
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(gradient_batch, np.multiply(normalized,
reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(w, training_data,
training_expected))
testing_accuracies.append(check_accuracy(w, testing_data,
testing_expected))
return training_accuracies, testing_accuracies
<|reserved_special_token_0|>
if len(args) < 2:
print(
'You must include a training and testing dataset, as well as a learning rate'
, file=sys.stderr)
print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'
)
exit(1)
<|reserved_special_token_0|>
for i in range(0, 100):
iterations.append(i + 1)
<|reserved_special_token_0|>
plt.ylabel('Accuracy')
plt.xlabel('Iteration')
plt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f'graph_results.png')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')
te_exp = te_exp[:, -1]
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data,
testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1])
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1])
for i in range(training_data.shape[0]):
predicted = sigmoid(w, training_data[i])
diff = np.subtract(predicted, training_expected[i])
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(gradient_batch, np.multiply(normalized,
reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(w, training_data,
training_expected))
testing_accuracies.append(check_accuracy(w, testing_data,
testing_expected))
return training_accuracies, testing_accuracies
args = sys.argv[1:]
if len(args) < 2:
print(
'You must include a training and testing dataset, as well as a learning rate'
, file=sys.stderr)
print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'
)
exit(1)
iterations = []
for i in range(0, 100):
iterations.append(i + 1)
training_features, training_expected, test_features, test_expected = (
load_files(args[0], args[1]))
training_accuracies, testing_accuracies = gradient(training_features,
training_expected, test_features, test_expected, learning_rate=float(
args[2]))
plt.ylabel('Accuracy')
plt.xlabel('Iteration')
plt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f'graph_results.png')
<|reserved_special_token_1|>
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import random
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')
te_exp = te_exp[:, -1]
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data,
testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1])
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1])
for i in range(training_data.shape[0]):
predicted = sigmoid(w, training_data[i])
diff = np.subtract(predicted, training_expected[i])
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(gradient_batch, np.multiply(normalized,
reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(w, training_data,
training_expected))
testing_accuracies.append(check_accuracy(w, testing_data,
testing_expected))
return training_accuracies, testing_accuracies
args = sys.argv[1:]
if len(args) < 2:
print(
'You must include a training and testing dataset, as well as a learning rate'
, file=sys.stderr)
print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'
)
exit(1)
iterations = []
for i in range(0, 100):
iterations.append(i + 1)
training_features, training_expected, test_features, test_expected = (
load_files(args[0], args[1]))
training_accuracies, testing_accuracies = gradient(training_features,
training_expected, test_features, test_expected, learning_rate=float(
args[2]))
plt.ylabel('Accuracy')
plt.xlabel('Iteration')
plt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f'graph_results.png')
<|reserved_special_token_1|>
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import random
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=",")
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=",")
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=",")
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=",")
te_exp = te_exp[:, -1]
# for i in tr_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
# for i in te_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
# try:
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
# If you've gotten this far you've noticed that the last two accuracies are always 50%
# I couldn't tell you why, seeing as our weights look correct
# And
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data, testing_expected, reg_strength=None, iterations=100, learning_rate=0.00005):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1]) # Feature count
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1]) # Feature count
for i in range(training_data.shape[0]): # Example count
predicted = sigmoid(w, training_data[i])
diff = (np.subtract(
predicted, training_expected[i]))
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(
gradient_batch, np.multiply(normalized, reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(
w, training_data, training_expected))
testing_accuracies.append(check_accuracy(
w, testing_data, testing_expected))
return training_accuracies, testing_accuracies
args = sys.argv[1:]
if len(args) < 2:
print("You must include a training and testing dataset, as well as a learning rate", file=sys.stderr)
print("Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate")
exit(1)
iterations = []
for i in range(0, 100):
iterations.append(i+1)
training_features, training_expected, test_features, test_expected = load_files(
args[0], args[1])
training_accuracies, testing_accuracies = gradient(
training_features, training_expected, test_features, test_expected, learning_rate=float(args[2]))
plt.ylabel("Accuracy")
plt.xlabel("Iteration")
plt.title(f"Accuracy as Function of Iteration Learing Rate = {args[2]}")
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f"graph_results.png")
|
flexible
|
{
"blob_id": "4af05a13264c249be69071447101d684ff97063e",
"index": 6725,
"step-1": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\n<mask token>\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\n<mask token>\nfor i in range(0, 100):\n iterations.append(i + 1)\n<mask token>\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-3": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\niterations = []\nfor i in range(0, 100):\n iterations.append(i + 1)\ntraining_features, training_expected, test_features, test_expected = (\n load_files(args[0], args[1]))\ntraining_accuracies, testing_accuracies = gradient(training_features,\n training_expected, test_features, test_expected, learning_rate=float(\n args[2]))\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-4": "import sys\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\niterations = []\nfor i in range(0, 100):\n iterations.append(i + 1)\ntraining_features, training_expected, test_features, test_expected = (\n load_files(args[0], args[1]))\ntraining_accuracies, testing_accuracies = gradient(training_features,\n training_expected, test_features, test_expected, learning_rate=float(\n args[2]))\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-5": "import sys\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=\",\")\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=\",\")\n tr_exp = tr_exp[:, -1]\n\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=\",\")\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=\",\")\n te_exp = te_exp[:, -1]\n\n # for i in tr_feat:\n # if i > 1 or i < 0:\n # raise ValueError(\"WHY\")\n # for i in te_feat:\n # if i > 1 or i < 0:\n # raise ValueError(\"WHY\")\n\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n # try:\n exponent = -np.dot(weight.T, case)\n\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n # If you've gotten this far you've noticed that the last two accuracies are always 50%\n # I couldn't tell you why, seeing as our weights look correct\n # And\n\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data, testing_expected, reg_strength=None, iterations=100, learning_rate=0.00005):\n training_accuracies = []\n testing_accuracies = []\n\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n\n w = np.zeros(training_data.shape[1]) # Feature count\n\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1]) # Feature count\n for i in range(training_data.shape[0]): # Example count\n predicted = sigmoid(w, training_data[i])\n diff = (np.subtract(\n predicted, training_expected[i]))\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(\n gradient_batch, np.multiply(normalized, reg_strength))\n\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n\n training_accuracies.append(check_accuracy(\n w, training_data, training_expected))\n testing_accuracies.append(check_accuracy(\n w, testing_data, testing_expected))\n\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\"You must include a training and testing dataset, as well as a learning rate\", file=sys.stderr)\n print(\"Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate\")\n exit(1)\n\niterations = []\nfor i in range(0, 100):\n iterations.append(i+1)\n\ntraining_features, training_expected, test_features, test_expected = load_files(\n args[0], args[1])\ntraining_accuracies, testing_accuracies = gradient(\n training_features, training_expected, test_features, test_expected, learning_rate=float(args[2]))\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Iteration\")\nplt.title(f\"Accuracy as Function of Iteration Learing Rate = {args[2]}\")\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f\"graph_results.png\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import sys
max = sys.maxsize
print(" sys.maxsize -> ", max)
|
normal
|
{
"blob_id": "c1c79e5adc620690e4e386f7f1cd9f781eeec0ce",
"index": 6843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(' sys.maxsize -> ', max)\n",
"step-3": "<mask token>\nmax = sys.maxsize\nprint(' sys.maxsize -> ', max)\n",
"step-4": "import sys\nmax = sys.maxsize\nprint(' sys.maxsize -> ', max)\n",
"step-5": "import sys\n\nmax = sys.maxsize\nprint(\" sys.maxsize -> \", max)\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def return_major(Y):
label_count = {}
for i in Y:
label_count[i] = label_count.get(i, 0) + 1
sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),
reverse=True)
return sorted_class[0][0]
def splitDataSet(X, fea, value):
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del i[fea]
y.append(i)
return y
<|reserved_special_token_0|>
def calcEnt(X):
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i, 0) + 1
tem = np.array(list(labelCount.values()))
tem = tem / len(X)
return np.sum(-np.log(tem) * tem)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def return_major(Y):
label_count = {}
for i in Y:
label_count[i] = label_count.get(i, 0) + 1
sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),
reverse=True)
return sorted_class[0][0]
def splitDataSet(X, fea, value):
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del i[fea]
y.append(i)
return y
def bestdived(X):
baseEnt = calcEnt(X)
tem0 = 0
for i in range(len(X[0]) - 1):
feaValue = [x[i] for x in X]
uniqueValue = set(feaValue)
tem1 = 0
for j in uniqueValue:
subDataset = splitDataSet(X, i, j)
prob = len(subDataset) / len(X)
tem1 = tem1 + prob * calcEnt(subDataset)
infoGain = baseEnt - tem1
if infoGain > tem0:
tem0 = infoGain
bestFea = i
return bestFea
def calcEnt(X):
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i, 0) + 1
tem = np.array(list(labelCount.values()))
tem = tem / len(X)
return np.sum(-np.log(tem) * tem)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def construct_tree(X, label):
classList = [sample[-1] for sample in X]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(X[0]) == 1:
return return_major(classList)
bestFea = bestdived(X)
bestFeaName = label[bestFea]
feaValue = [x[bestFea] for x in X]
uniqueValue = set(feaValue)
myTree = {bestFeaName: {}}
del label[bestFea]
for i in uniqueValue:
myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),
label)
return myTree
def return_major(Y):
label_count = {}
for i in Y:
label_count[i] = label_count.get(i, 0) + 1
sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),
reverse=True)
return sorted_class[0][0]
def splitDataSet(X, fea, value):
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del i[fea]
y.append(i)
return y
def bestdived(X):
baseEnt = calcEnt(X)
tem0 = 0
for i in range(len(X[0]) - 1):
feaValue = [x[i] for x in X]
uniqueValue = set(feaValue)
tem1 = 0
for j in uniqueValue:
subDataset = splitDataSet(X, i, j)
prob = len(subDataset) / len(X)
tem1 = tem1 + prob * calcEnt(subDataset)
infoGain = baseEnt - tem1
if infoGain > tem0:
tem0 = infoGain
bestFea = i
return bestFea
def calcEnt(X):
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i, 0) + 1
tem = np.array(list(labelCount.values()))
tem = tem / len(X)
return np.sum(-np.log(tem) * tem)
<|reserved_special_token_1|>
import numpy as np
import copy
<|reserved_special_token_0|>
def construct_tree(X, label):
classList = [sample[-1] for sample in X]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(X[0]) == 1:
return return_major(classList)
bestFea = bestdived(X)
bestFeaName = label[bestFea]
feaValue = [x[bestFea] for x in X]
uniqueValue = set(feaValue)
myTree = {bestFeaName: {}}
del label[bestFea]
for i in uniqueValue:
myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),
label)
return myTree
def return_major(Y):
label_count = {}
for i in Y:
label_count[i] = label_count.get(i, 0) + 1
sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),
reverse=True)
return sorted_class[0][0]
def splitDataSet(X, fea, value):
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del i[fea]
y.append(i)
return y
def bestdived(X):
baseEnt = calcEnt(X)
tem0 = 0
for i in range(len(X[0]) - 1):
feaValue = [x[i] for x in X]
uniqueValue = set(feaValue)
tem1 = 0
for j in uniqueValue:
subDataset = splitDataSet(X, i, j)
prob = len(subDataset) / len(X)
tem1 = tem1 + prob * calcEnt(subDataset)
infoGain = baseEnt - tem1
if infoGain > tem0:
tem0 = infoGain
bestFea = i
return bestFea
def calcEnt(X):
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i, 0) + 1
tem = np.array(list(labelCount.values()))
tem = tem / len(X)
return np.sum(-np.log(tem) * tem)
<|reserved_special_token_1|>
import numpy as np
import copy
'''
本脚本主要用来实现决策树的相关内容。
constrcut_tree:该函数是构建决策树的主要函数
其输入:数据集X:n*p n:样本数,p-1维特征,p为样本类别,
以及属性信息label:属性名称,p-1一维数组,label表示的是此时X每一列对应的属性名称
决策结构用字典来表示,例如{attribution1:{0:{attribution2:{}},1:{attribution3:{}}}
'''
def construct_tree(X,label):
classList = [sample[-1] for sample in X]
#如果此时所有的样本的类别相同,返回该类别。
if classList.count(classList[0]) == len(classList):
return classList[0]
#如果此时对应属性已经划分完毕
if len(X[0])==1:
return return_major(classList)
#如果此时划分之后的子集为空,但是显然这是不可能的,对于这种情况来说,
#因为我们后面的编程过程中,我的属性划分的个数是根据,此时样本的属性数
#得到的,而不是一开始默认的,注意于西瓜书上算法的区别
#选择最优划分属性:
bestFea = bestdived(X)
bestFeaName = label[bestFea]
feaValue = [x[bestFea] for x in X]
uniqueValue = set(feaValue)
myTree = {bestFeaName:{}}
del(label[bestFea])
for i in uniqueValue:
myTree[bestFeaName][i]=construct_tree(splitDataSet(X,bestFea,i),label)
return myTree
#统计一组数据中,出现次数最多的时候用以下代码
def return_major(Y):
#给定一组类别,返回这组数据中,最大的类别
label_count={}
for i in Y:
label_count[i] = label_count.get(i,0)+1
sorted_class = sorted(label_count.items(),key=operator.itemgetter(1),reverse=True)
return sorted_class[0][0]
def splitDataSet(X,fea,value):
#根据属性的某个值得到相应的数据集
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del(i[fea])
y.append(i)
return y
def bestdived(X):
#对任何一个特征进行划分,计算得到的数据集的熵。然后计算
#这个特征对应的信息增益
baseEnt = calcEnt(X)
tem0 = 0#记录最大的信息增益
for i in range(len(X[0])-1):
#fea 循环
feaValue = [x[i] for x in X]
uniqueValue = set(feaValue)
tem1 = 0#记录该特征划分的子集熵的总和
for j in uniqueValue:
subDataset = splitDataSet(X,i,j)
prob = len(subDataset)/len(X)
tem1 = tem1 + prob*calcEnt(subDataset)
infoGain = baseEnt - tem1
if infoGain > tem0:
tem0 = infoGain
bestFea = i
return bestFea
def calcEnt(X):
#计算数据即X的熵,此时的熵是当对于类别信息来的。
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i,0)+1;
tem = np.array(list(labelCount.values()))
tem = tem/len(X)
return np.sum(-np.log(tem)*tem)
|
flexible
|
{
"blob_id": "ff66b33a133b627ba2329434d6c1649c94b6ec78",
"index": 8188,
"step-1": "<mask token>\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\n<mask token>\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n",
"step-2": "<mask token>\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n",
"step-3": "<mask token>\n\n\ndef construct_tree(X, label):\n classList = [sample[-1] for sample in X]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n if len(X[0]) == 1:\n return return_major(classList)\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName: {}}\n del label[bestFea]\n for i in uniqueValue:\n myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),\n label)\n return myTree\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n",
"step-4": "import numpy as np\nimport copy\n<mask token>\n\n\ndef construct_tree(X, label):\n classList = [sample[-1] for sample in X]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n if len(X[0]) == 1:\n return return_major(classList)\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName: {}}\n del label[bestFea]\n for i in uniqueValue:\n myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),\n label)\n return myTree\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n",
"step-5": "import numpy as np\nimport copy\n'''\n本脚本主要用来实现决策树的相关内容。\nconstrcut_tree:该函数是构建决策树的主要函数\n其输入:数据集X:n*p n:样本数,p-1维特征,p为样本类别,\n以及属性信息label:属性名称,p-1一维数组,label表示的是此时X每一列对应的属性名称\n决策结构用字典来表示,例如{attribution1:{0:{attribution2:{}},1:{attribution3:{}}}\n'''\n\ndef construct_tree(X,label):\n \n classList = [sample[-1] for sample in X]\n #如果此时所有的样本的类别相同,返回该类别。\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n #如果此时对应属性已经划分完毕\n if len(X[0])==1:\n return return_major(classList)\n #如果此时划分之后的子集为空,但是显然这是不可能的,对于这种情况来说,\n #因为我们后面的编程过程中,我的属性划分的个数是根据,此时样本的属性数\n #得到的,而不是一开始默认的,注意于西瓜书上算法的区别\n\n #选择最优划分属性:\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName:{}}\n del(label[bestFea])\n for i in uniqueValue:\n myTree[bestFeaName][i]=construct_tree(splitDataSet(X,bestFea,i),label)\n return myTree\n\n\n\n\n#统计一组数据中,出现次数最多的时候用以下代码\ndef return_major(Y):\n #给定一组类别,返回这组数据中,最大的类别\n label_count={}\n for i in Y:\n label_count[i] = label_count.get(i,0)+1\n sorted_class = sorted(label_count.items(),key=operator.itemgetter(1),reverse=True)\n return sorted_class[0][0]\n\ndef splitDataSet(X,fea,value):\n #根据属性的某个值得到相应的数据集\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del(i[fea])\n y.append(i)\n return y\n\ndef bestdived(X):\n #对任何一个特征进行划分,计算得到的数据集的熵。然后计算\n #这个特征对应的信息增益\n baseEnt = calcEnt(X)\n tem0 = 0#记录最大的信息增益\n for i in range(len(X[0])-1):\n #fea 循环\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0#记录该特征划分的子集熵的总和\n for j in uniqueValue:\n subDataset = splitDataSet(X,i,j)\n prob = len(subDataset)/len(X)\n tem1 = tem1 + prob*calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\ndef calcEnt(X):\n #计算数据即X的熵,此时的熵是当对于类别信息来的。\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i,0)+1;\n tem = np.array(list(labelCount.values()))\n tem = tem/len(X)\n return np.sum(-np.log(tem)*tem)\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
def ler (t):
i =0
for s in sys.stdin:
l=s.split(" ")
t.append(l)
def melhor (t):
i=1
x=int(t[0][0].strip("\n"))
n=len(t)
while(i<n):
u=int((t[i][2]).strip())
if(u<x)
i+=1
def vendedor():
t=[]
ler(t)
melhor(t)
vendedor()
|
normal
|
{
"blob_id": "76664114382bdeb0bffb996e4dd4448b6c87520d",
"index": 9719,
"step-1": "import sys \n\ndef ler (t):\n\ti =0\n\tfor s in sys.stdin:\n\t\tl=s.split(\" \")\n\t\tt.append(l)\n\ndef melhor (t):\n\ti=1\n\tx=int(t[0][0].strip(\"\\n\"))\n\tn=len(t)\n\twhile(i<n):\n\t\tu=int((t[i][2]).strip())\n\t\tif(u<x)\n\t\ti+=1\n\n\n\n\ndef vendedor():\n\tt=[]\n\tler(t)\n\tmelhor(t)\nvendedor()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TestNonMiscView:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(
pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
<|reserved_special_token_0|>
def test_get_featured_challenges(self, db):
challenges = {(active, discarted): mommy.make('Challenge', active=
active, discarted=discarted) for active, discarted in product((
False, True), repeat=2)}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[True, False]
def test_get_authors_empty(self, db):
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = False, True
email_options = '', '[email protected]', '[email protected]'
authors = {(staff, email): mommy.make('UserProfile', user__is_staff
=staff, user__email=email) for staff, email in product(
staff_options, email_options)}
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[False, '[email protected]']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNonMiscView:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(
pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
def test_get_featured_challenges_empty(self, db):
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_featured_challenges(self, db):
challenges = {(active, discarted): mommy.make('Challenge', active=
active, discarted=discarted) for active, discarted in product((
False, True), repeat=2)}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[True, False]
def test_get_authors_empty(self, db):
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = False, True
email_options = '', '[email protected]', '[email protected]'
authors = {(staff, email): mommy.make('UserProfile', user__is_staff
=staff, user__email=email) for staff, email in product(
staff_options, email_options)}
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[False, '[email protected]']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNonMiscView:
<|reserved_special_token_0|>
def test_get_term_of_user_empty(self, rf, db):
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'No Term of Use found'}
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(
pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
def test_get_featured_challenges_empty(self, db):
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_featured_challenges(self, db):
challenges = {(active, discarted): mommy.make('Challenge', active=
active, discarted=discarted) for active, discarted in product((
False, True), repeat=2)}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[True, False]
def test_get_authors_empty(self, db):
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = False, True
email_options = '', '[email protected]', '[email protected]'
authors = {(staff, email): mommy.make('UserProfile', user__is_staff
=staff, user__email=email) for staff, email in product(
staff_options, email_options)}
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[False, '[email protected]']
<|reserved_special_token_1|>
import json
from datetime import datetime, timedelta
from itertools import product
from django.db.models import QuerySet
import pytz
from model_mommy import mommy
from ...views import get_authors, get_featured_challenges, get_term_of_user
class TestNonMiscView:
"""Test for non view functions in ideax.views (for refactor)"""
def test_get_term_of_user_empty(self, rf, db):
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'No Term of Use found'}
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(
pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
def test_get_featured_challenges_empty(self, db):
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_featured_challenges(self, db):
challenges = {(active, discarted): mommy.make('Challenge', active=
active, discarted=discarted) for active, discarted in product((
False, True), repeat=2)}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[True, False]
def test_get_authors_empty(self, db):
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = False, True
email_options = '', '[email protected]', '[email protected]'
authors = {(staff, email): mommy.make('UserProfile', user__is_staff
=staff, user__email=email) for staff, email in product(
staff_options, email_options)}
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[False, '[email protected]']
<|reserved_special_token_1|>
import json
from datetime import datetime, timedelta
from itertools import product
from django.db.models import QuerySet
import pytz
from model_mommy import mommy
from ...views import get_authors, get_featured_challenges, get_term_of_user
class TestNonMiscView:
"""Test for non view functions in ideax.views (for refactor)"""
def test_get_term_of_user_empty(self, rf, db):
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'No Term of Use found'}
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
def test_get_featured_challenges_empty(self, db):
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_featured_challenges(self, db):
challenges = {
(active, discarted): mommy.make('Challenge', active=active, discarted=discarted)
for active, discarted in product((False, True), repeat=2)
}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[(True, False)]
def test_get_authors_empty(self, db):
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = (False, True)
# User e-mail cannot be null (refactor get_authors)
email_options = ('', '[email protected]', '[email protected]')
authors = {
(staff, email): mommy.make('UserProfile', user__is_staff=staff, user__email=email)
for staff, email in product(staff_options, email_options)
}
response = get_authors('[email protected]')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[(False, '[email protected]')]
|
flexible
|
{
"blob_id": "8d6e4d06e390b4a45e576239189745c2e37217c5",
"index": 2699,
"step-1": "<mask token>\n\n\nclass TestNonMiscView:\n <mask token>\n <mask token>\n\n def test_get_term_of_user(self, rf, db):\n mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(\n pytz.UTC) + timedelta(days=1))\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'EULA Test'}\n <mask token>\n\n def test_get_featured_challenges(self, db):\n challenges = {(active, discarted): mommy.make('Challenge', active=\n active, discarted=discarted) for active, discarted in product((\n False, True), repeat=2)}\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == challenges[True, False]\n\n def test_get_authors_empty(self, db):\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_authors(self, db):\n staff_options = False, True\n email_options = '', '[email protected]', '[email protected]'\n authors = {(staff, email): mommy.make('UserProfile', user__is_staff\n =staff, user__email=email) for staff, email in product(\n staff_options, email_options)}\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == authors[False, '[email protected]']\n",
"step-2": "<mask token>\n\n\nclass TestNonMiscView:\n <mask token>\n <mask token>\n\n def test_get_term_of_user(self, rf, db):\n mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(\n pytz.UTC) + timedelta(days=1))\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'EULA Test'}\n\n def test_get_featured_challenges_empty(self, db):\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_featured_challenges(self, db):\n challenges = {(active, discarted): mommy.make('Challenge', active=\n active, discarted=discarted) for active, discarted in product((\n False, True), repeat=2)}\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == challenges[True, False]\n\n def test_get_authors_empty(self, db):\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_authors(self, db):\n staff_options = False, True\n email_options = '', '[email protected]', '[email protected]'\n authors = {(staff, email): mommy.make('UserProfile', user__is_staff\n =staff, user__email=email) for staff, email in product(\n staff_options, email_options)}\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == authors[False, '[email protected]']\n",
"step-3": "<mask token>\n\n\nclass TestNonMiscView:\n <mask token>\n\n def test_get_term_of_user_empty(self, rf, db):\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'No Term of Use found'}\n\n def test_get_term_of_user(self, rf, db):\n mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(\n pytz.UTC) + timedelta(days=1))\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'EULA Test'}\n\n def test_get_featured_challenges_empty(self, db):\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_featured_challenges(self, db):\n challenges = {(active, discarted): mommy.make('Challenge', active=\n active, discarted=discarted) for active, discarted in product((\n False, True), repeat=2)}\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == challenges[True, False]\n\n def test_get_authors_empty(self, db):\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_authors(self, db):\n staff_options = False, True\n email_options = '', '[email protected]', '[email protected]'\n authors = {(staff, email): mommy.make('UserProfile', user__is_staff\n =staff, user__email=email) for staff, email in product(\n staff_options, email_options)}\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == authors[False, '[email protected]']\n",
"step-4": "import json\nfrom datetime import datetime, timedelta\nfrom itertools import product\nfrom django.db.models import QuerySet\nimport pytz\nfrom model_mommy import mommy\nfrom ...views import get_authors, get_featured_challenges, get_term_of_user\n\n\nclass TestNonMiscView:\n \"\"\"Test for non view functions in ideax.views (for refactor)\"\"\"\n\n def test_get_term_of_user_empty(self, rf, db):\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'No Term of Use found'}\n\n def test_get_term_of_user(self, rf, db):\n mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(\n pytz.UTC) + timedelta(days=1))\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'EULA Test'}\n\n def test_get_featured_challenges_empty(self, db):\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_featured_challenges(self, db):\n challenges = {(active, discarted): mommy.make('Challenge', active=\n active, discarted=discarted) for active, discarted in product((\n False, True), repeat=2)}\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == challenges[True, False]\n\n def test_get_authors_empty(self, db):\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_authors(self, db):\n staff_options = False, True\n email_options = '', '[email protected]', '[email protected]'\n authors = {(staff, email): mommy.make('UserProfile', user__is_staff\n =staff, user__email=email) for staff, email in product(\n staff_options, email_options)}\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == authors[False, '[email protected]']\n",
"step-5": "import json\n\nfrom datetime import datetime, timedelta\nfrom itertools import product\n\nfrom django.db.models import QuerySet\n\nimport pytz\n\nfrom model_mommy import mommy\n\nfrom ...views import get_authors, get_featured_challenges, get_term_of_user\n\n\nclass TestNonMiscView:\n \"\"\"Test for non view functions in ideax.views (for refactor)\"\"\"\n def test_get_term_of_user_empty(self, rf, db):\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'No Term of Use found'}\n\n def test_get_term_of_user(self, rf, db):\n mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(pytz.UTC) + timedelta(days=1))\n request = rf.get('/')\n response = get_term_of_user(request)\n assert response.status_code == 200\n assert json.loads(response.content) == {'term': 'EULA Test'}\n\n def test_get_featured_challenges_empty(self, db):\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_featured_challenges(self, db):\n challenges = {\n (active, discarted): mommy.make('Challenge', active=active, discarted=discarted)\n for active, discarted in product((False, True), repeat=2)\n }\n response = get_featured_challenges()\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == challenges[(True, False)]\n\n def test_get_authors_empty(self, db):\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 0\n\n def test_get_authors(self, db):\n staff_options = (False, True)\n # User e-mail cannot be null (refactor get_authors)\n email_options = ('', '[email protected]', '[email protected]')\n\n authors = {\n (staff, email): mommy.make('UserProfile', user__is_staff=staff, user__email=email)\n for staff, email in product(staff_options, email_options)\n }\n response = get_authors('[email protected]')\n assert isinstance(response, QuerySet)\n assert response.count() == 1\n assert response.first() == authors[(False, '[email protected]')]\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
from .tacotron_v2_synthesizer import Tacotron2Synthesizer
|
normal
|
{
"blob_id": "cf2fcd013c3e9992da36806ca93aacb4b5399396",
"index": 3172,
"step-1": "<mask token>\n",
"step-2": "from .tacotron_v2_synthesizer import Tacotron2Synthesizer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(
page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items, page=
page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
pagination = BanList.query.filter_by(deleted=False).paginate(page=
page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
template_param = {'keywords': pagination.items, 'page': page,
'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({'status': 302, 'location': url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.
time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit
)
status.save()
flash(u'添加关键词成功')
elif exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.
time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get('code')
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
try:
pass
except BaseException as e:
return e
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(),
pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u'表单不合法')
cookie = form.cookie.data
env = Env()
env.set('COOKIE', cookie)
flash(u'设置 Cookie 成功')
return redirect(url_for('main.cookie'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserList(MethodView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username:
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
elif current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False
).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=
current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
elif current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False
).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=
username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(
page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items, page=
page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
pagination = BanList.query.filter_by(deleted=False).paginate(page=
page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
template_param = {'keywords': pagination.items, 'page': page,
'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({'status': 302, 'location': url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.
time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit
)
status.save()
flash(u'添加关键词成功')
elif exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.
time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get('code')
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
try:
pass
except BaseException as e:
return e
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(),
pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u'表单不合法')
cookie = form.cookie.data
env = Env()
env.set('COOKIE', cookie)
flash(u'设置 Cookie 成功')
return redirect(url_for('main.cookie'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserList(MethodView):
<|reserved_special_token_0|>
def __init__(self):
self.form = AddUserForm
<|reserved_special_token_0|>
def post(self):
data = request.get_json()
if data:
if data['action'] == 'edit':
username = data['username']
else:
username = data['username']
try:
User.query.filter_by(username=username, deleted=False
).first().delete()
except:
flash(u'用户不存在')
return jsonify({'status': 302, 'location': url_for(
'main.editprofile', username=username)})
elif request.form:
self.add_user()
return redirect('userlist')
def add_user(self):
form = self.form(request.form)
if form.validate():
role = Role.query.filter_by(name=form.role.data).first()
if role:
if not User.query.filter_by(email=form.email.data).first():
user = User(email=form.email.data, username=form.
username.data, role=role, password=form.password.data)
user.save()
else:
flash(u'已经存在该用户')
else:
flash(u'不存在该用户组')
return redirect(url_for('main.userlist'))
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username:
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
elif current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False
).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=
current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
elif current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False
).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=
username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(
page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items, page=
page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
pagination = BanList.query.filter_by(deleted=False).paginate(page=
page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
template_param = {'keywords': pagination.items, 'page': page,
'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({'status': 302, 'location': url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.
time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit
)
status.save()
flash(u'添加关键词成功')
elif exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.
time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get('code')
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
try:
pass
except BaseException as e:
return e
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(),
pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u'表单不合法')
cookie = form.cookie.data
env = Env()
env.set('COOKIE', cookie)
flash(u'设置 Cookie 成功')
return redirect(url_for('main.cookie'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ManualUpdate(MethodView):
<|reserved_special_token_0|>
def __init__(self):
self.form = PushForm
def get(self):
return render_template('main/mupdate.html', form=self.form(),
pushtime=10)
def post(self):
if not current_user.can(Permission.MANUAL_PUSH):
flash(u'你没有权限')
form = self.form(request.form)
if not form.validate():
flash(u'条目格式有问题,请检查并重新填写')
title = form.pushtitle.data
result = self.check_push_validate(title.encode('utf-8'))
if not result:
flash(u'推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表')
try:
image = MoegirlImage(title)
except HTTPError as e:
flash(u'请求萌百错误,错误码如下{},请联系管理员'.format(e))
return redirect(url_for('main.mupdate'))
if not image.path:
flash(u'无法取得图片,请重试')
entry = WaitingQueue(title=title, image=image.path)
env = Env()
current_weight = env.get('CUTTING_WEIGHT_INIT')
entry.cutting_weight = current_weight + 1
entry.save()
env.set('CUTTING_WEIGHT_INIT', entry.cutting_weight)
UserOperation(user_id=current_user.id, title=title, operation=
Operation.PUSH).save()
if form.industry.data:
try:
from koushihime.crontab import push
push()
except Exception as e:
flash(u'推送失败: {}'.format(str(e)))
flash(u'操作成功,词条将立即推送')
return redirect(url_for('main.mupdate'))
@staticmethod
def check_push_validate(title):
moegirl_entry = MoegirlQuery(title)
namespace = moegirl_entry.get_namespace()
if namespace is 0:
baned_from_moegirl = moegirl_entry.banned_moegirl_category()
baned_from_regex = moegirl_entry.ban_from_regex()
has_pushed = recent_have_pushed(title.decode('utf-8'))
has_catched = have_auto_catched(title.decode('utf-8'))
result = (baned_from_moegirl is False and has_pushed is False and
has_catched is False and baned_from_regex is False)
return result
else:
return False
class UserInfo(MethodView):
decorators = [login_required]
def get(self, username):
is_admin = current_user.can(Permission.ADMINISTER)
if current_user.username == username or is_admin is True:
user_info = User.query.filter_by(username=username, deleted=False
).first()
if not user_info:
abort(404)
return render_template('main/user.html', u=user_info, username=
user_info.username)
else:
abort(403)
class UserList(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = AddUserForm
def get(self):
userlist = User.query.filter_by(deleted=False).all()
return render_template('main/userlist.html', userlist=userlist,
form=self.form())
def post(self):
data = request.get_json()
if data:
if data['action'] == 'edit':
username = data['username']
else:
username = data['username']
try:
User.query.filter_by(username=username, deleted=False
).first().delete()
except:
flash(u'用户不存在')
return jsonify({'status': 302, 'location': url_for(
'main.editprofile', username=username)})
elif request.form:
self.add_user()
return redirect('userlist')
def add_user(self):
form = self.form(request.form)
if form.validate():
role = Role.query.filter_by(name=form.role.data).first()
if role:
if not User.query.filter_by(email=form.email.data).first():
user = User(email=form.email.data, username=form.
username.data, role=role, password=form.password.data)
user.save()
else:
flash(u'已经存在该用户')
else:
flash(u'不存在该用户组')
return redirect(url_for('main.userlist'))
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username:
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
elif current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False
).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=
current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
elif current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False
).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=
username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(
page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items, page=
page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
pagination = BanList.query.filter_by(deleted=False).paginate(page=
page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page, total=
count, format_total=True, format_number=True)
template_param = {'keywords': pagination.items, 'page': page,
'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({'status': 302, 'location': url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.
time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit
)
status.save()
flash(u'添加关键词成功')
elif exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.
time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get('code')
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
try:
pass
except BaseException as e:
return e
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(),
pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u'表单不合法')
cookie = form.cookie.data
env = Env()
env.set('COOKIE', cookie)
flash(u'设置 Cookie 成功')
return redirect(url_for('main.cookie'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import urllib
from urllib2 import HTTPError
from datetime import datetime
from flask.views import MethodView
from flask.ext.login import current_user, login_required
from flask.ext.paginate import Pagination as PaginationBar
from flask import render_template, redirect, url_for, request, jsonify, flash, current_app, abort
from koushihime.auth.models import UserOperation, User, Role
from koushihime.auth.constants import Permission, Operation
from koushihime.utils import Pagination, admin_required, Env
from koushihime.utils.moegirl import MoegirlQuery, MoegirlImage
from . import main
from utils import recent_have_pushed, have_auto_catched
from models import WaitingQueue, BanList, RulePushCount
from forms import PushForm, AddUserForm, EditProfileForm, AdminEditProfileForm, BanKeywordForm, CookieForm
@main.before_request
def before_request():
if current_user.is_anonymous:
return redirect(url_for('auth.login'))
elif current_user.is_blocked:
return render_template('main/auth/block.html')
else:
current_user.last_seen = datetime.utcnow()
current_user.save()
class Index(MethodView):
def get(self):
if not current_user:
return redirect(url_for("auth.login"))
config = current_app.config["WEIBO_AUTH_CONFIG"]
callback = urllib.quote(config["CALLBACK"])
app_key = config["APP_KEY"]
return render_template('main/index.html', callback=callback, app_key=app_key)
class Update(MethodView):
decorators = [login_required]
def get(self, page):
per_page = 10
unpushed_entry = WaitingQueue.query.order_by(WaitingQueue.cutting_weight.desc()).all()
pagination = Pagination(unpushed_entry, per_page)
current_page = pagination.page(page)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=True, page=page,
per_page=per_page, total=len(unpushed_entry),
format_total=True, format_number=True)
result = {
"titles": current_page,
"current_time": datetime.utcnow(),
"pushtime": 10,
"deltime": 999,
"page": page,
"per_page": per_page,
"pagination": foot_bar
}
return render_template('main/update.html', **result)
def post(self, page):
data = request.get_json()
if data['action'] == 'post':
title = data["title"]
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry = WaitingQueue.query.filter_by(title=title).first()
if entry:
entry.cutting_weight = current_weight + 1 # FIXME: 即使条目处于权重最高状态亦可增加权限
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
elif data['action'] == 'del':
title = data['title']
UserOperation(user_id=current_user.id, operation=Operation.DELETE, title=title).save()
query = WaitingQueue.query.filter_by(title=data['title']).first()
if query:
query.delete()
response = jsonify({'result': True})
return response
class ManualUpdate(MethodView):
decorators = [login_required]
def __init__(self):
self.form = PushForm
def get(self):
return render_template('main/mupdate.html', form=self.form(), pushtime=10)
def post(self):
if not current_user.can(Permission.MANUAL_PUSH):
flash(u"你没有权限")
form = self.form(request.form)
if not form.validate():
flash(u"条目格式有问题,请检查并重新填写")
title = form.pushtitle.data
result = self.check_push_validate(title.encode("utf-8"))
if not result:
flash(u"推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表")
try:
image = MoegirlImage(title)
except HTTPError as e:
flash(u"请求萌百错误,错误码如下{},请联系管理员".format(e))
return redirect(url_for('main.mupdate'))
if not image.path:
flash(u"无法取得图片,请重试")
entry = WaitingQueue(title=title, image=image.path)
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry.cutting_weight = current_weight + 1
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
UserOperation(user_id=current_user.id, title=title, operation=Operation.PUSH).save()
if form.industry.data:
try:
from koushihime.crontab import push
push()
except Exception as e:
flash(u"推送失败: {}".format(str(e)))
flash(u"操作成功,词条将立即推送")
return redirect(url_for('main.mupdate'))
@staticmethod
def check_push_validate(title):
moegirl_entry = MoegirlQuery(title)
namespace = moegirl_entry.get_namespace()
if namespace is 0:
baned_from_moegirl = moegirl_entry.banned_moegirl_category()
baned_from_regex = moegirl_entry.ban_from_regex()
has_pushed = recent_have_pushed(title.decode("utf-8")) # TODO: 改成自动冒泡
has_catched = have_auto_catched(title.decode("utf-8"))
result = baned_from_moegirl is False \
and has_pushed is False \
and has_catched is False \
and baned_from_regex is False
return result
else:
return False
class UserInfo(MethodView):
decorators = [login_required]
def get(self, username):
is_admin = current_user.can(Permission.ADMINISTER)
if current_user.username == username or is_admin is True:
user_info = User.query.filter_by(username=username, deleted=False).first()
if not user_info:
abort(404)
return render_template('main/user.html', u=user_info, username=user_info.username)
else:
abort(403)
class UserList(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = AddUserForm
def get(self):
userlist = User.query.filter_by(deleted=False).all()
return render_template('main/userlist.html', userlist=userlist, form=self.form())
def post(self):
data = request.get_json()
if data:
if data['action'] == 'edit':
username = data['username']
else:
username = data['username']
try:
User.query.filter_by(username=username, deleted=False).first().delete()
except:
flash(u'用户不存在')
return jsonify({"status": 302, "location": url_for('main.editprofile', username=username)})
elif request.form:
self.add_user()
return redirect('userlist')
def add_user(self):
form = self.form(request.form)
if form.validate():
role = Role.query.filter_by(name=form.role.data).first()
if role:
if not User.query.filter_by(email=form.email.data).first():
user = User(email=form.email.data, username=form.username.data,
role=role, password=form.password.data)
user.save()
else:
flash(u'已经存在该用户')
else:
flash(u'不存在该用户组')
return redirect(url_for('main.userlist'))
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username: # 用户访问自己的个人信息编辑页
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
else:
if current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
else:
if current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc())\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items,
page=page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
# TODO: 把关键词读入配置减少查询次数
pagination = BanList.query.filter_by(deleted=False)\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
template_param = {
'keywords': pagination.items,
'page': page,
'per_page': per_page,
'pagination': foot_bar,
'form': self.form()
}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({"status": 302, "location": url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit)
status.save()
flash(u'添加关键词成功')
else:
if exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
# TODO: deprecated
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get("code")
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
# config = current_app.config["WEIBO_AUTH_CONFIG"]
# callback = config["CALLBACK"]
# app_key = config["APP_KEY"]
# app_secret_key = config["APP_SECRET"]
try:
pass
# client = APIClient(app_key=app_key, app_secret=app_secret_key, redirect_uri=callback)
# token_data = client.request_access_token(self.auth_code)
# access_token, expires_in = token_data.access_token, token_data.expires_in
except BaseException as e:
return e
# config["ACCESS_TOKEN"] = access_token
# config["EXPIRE_TIME"] = expires_in
# env = Env()
# env.set("ACCESS_TOKEN", access_token)
# env = Env()
# env.set("EXPIRE_TIME", expires_in)
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(), pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u"表单不合法")
cookie = form.cookie.data
env = Env()
env.set("COOKIE", cookie)
flash(u"设置 Cookie 成功")
return redirect(url_for('main.cookie'))
|
flexible
|
{
"blob_id": "1a561ca0268d084c8fdde5de65ce0c7e68154eec",
"index": 4993,
"step-1": "<mask token>\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-2": "<mask token>\n\n\nclass UserList(MethodView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-3": "<mask token>\n\n\nclass UserList(MethodView):\n <mask token>\n\n def __init__(self):\n self.form = AddUserForm\n <mask token>\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False\n ).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({'status': 302, 'location': url_for(\n 'main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.\n username.data, role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-4": "<mask token>\n\n\nclass ManualUpdate(MethodView):\n <mask token>\n\n def __init__(self):\n self.form = PushForm\n\n def get(self):\n return render_template('main/mupdate.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n if not current_user.can(Permission.MANUAL_PUSH):\n flash(u'你没有权限')\n form = self.form(request.form)\n if not form.validate():\n flash(u'条目格式有问题,请检查并重新填写')\n title = form.pushtitle.data\n result = self.check_push_validate(title.encode('utf-8'))\n if not result:\n flash(u'推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表')\n try:\n image = MoegirlImage(title)\n except HTTPError as e:\n flash(u'请求萌百错误,错误码如下{},请联系管理员'.format(e))\n return redirect(url_for('main.mupdate'))\n if not image.path:\n flash(u'无法取得图片,请重试')\n entry = WaitingQueue(title=title, image=image.path)\n env = Env()\n current_weight = env.get('CUTTING_WEIGHT_INIT')\n entry.cutting_weight = current_weight + 1\n entry.save()\n env.set('CUTTING_WEIGHT_INIT', entry.cutting_weight)\n UserOperation(user_id=current_user.id, title=title, operation=\n Operation.PUSH).save()\n if form.industry.data:\n try:\n from koushihime.crontab import push\n push()\n except Exception as e:\n flash(u'推送失败: {}'.format(str(e)))\n flash(u'操作成功,词条将立即推送')\n return redirect(url_for('main.mupdate'))\n\n @staticmethod\n def check_push_validate(title):\n moegirl_entry = MoegirlQuery(title)\n namespace = moegirl_entry.get_namespace()\n if namespace is 0:\n baned_from_moegirl = moegirl_entry.banned_moegirl_category()\n baned_from_regex = moegirl_entry.ban_from_regex()\n has_pushed = recent_have_pushed(title.decode('utf-8'))\n has_catched = have_auto_catched(title.decode('utf-8'))\n result = (baned_from_moegirl is False and has_pushed is False and\n has_catched is False and baned_from_regex is False)\n return result\n else:\n return False\n\n\nclass UserInfo(MethodView):\n decorators = [login_required]\n\n def get(self, username):\n is_admin = current_user.can(Permission.ADMINISTER)\n if current_user.username == username or is_admin is True:\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if not user_info:\n abort(404)\n return render_template('main/user.html', u=user_info, username=\n user_info.username)\n else:\n abort(403)\n\n\nclass UserList(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = AddUserForm\n\n def get(self):\n userlist = User.query.filter_by(deleted=False).all()\n return render_template('main/userlist.html', userlist=userlist,\n form=self.form())\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False\n ).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({'status': 302, 'location': url_for(\n 'main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.\n username.data, role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport urllib\nfrom urllib2 import HTTPError\nfrom datetime import datetime\nfrom flask.views import MethodView\nfrom flask.ext.login import current_user, login_required\nfrom flask.ext.paginate import Pagination as PaginationBar\nfrom flask import render_template, redirect, url_for, request, jsonify, flash, current_app, abort\nfrom koushihime.auth.models import UserOperation, User, Role\nfrom koushihime.auth.constants import Permission, Operation\nfrom koushihime.utils import Pagination, admin_required, Env\nfrom koushihime.utils.moegirl import MoegirlQuery, MoegirlImage\nfrom . import main\nfrom utils import recent_have_pushed, have_auto_catched\nfrom models import WaitingQueue, BanList, RulePushCount\nfrom forms import PushForm, AddUserForm, EditProfileForm, AdminEditProfileForm, BanKeywordForm, CookieForm\n\n\[email protected]_request\ndef before_request():\n if current_user.is_anonymous:\n return redirect(url_for('auth.login'))\n elif current_user.is_blocked:\n return render_template('main/auth/block.html')\n else:\n current_user.last_seen = datetime.utcnow()\n current_user.save()\n\n\nclass Index(MethodView):\n\n def get(self):\n if not current_user:\n return redirect(url_for(\"auth.login\"))\n config = current_app.config[\"WEIBO_AUTH_CONFIG\"]\n callback = urllib.quote(config[\"CALLBACK\"])\n app_key = config[\"APP_KEY\"]\n return render_template('main/index.html', callback=callback, app_key=app_key)\n\n\nclass Update(MethodView):\n decorators = [login_required]\n\n def get(self, page):\n per_page = 10\n unpushed_entry = WaitingQueue.query.order_by(WaitingQueue.cutting_weight.desc()).all()\n pagination = Pagination(unpushed_entry, per_page)\n current_page = pagination.page(page)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=True, page=page,\n per_page=per_page, total=len(unpushed_entry),\n format_total=True, format_number=True)\n result = {\n \"titles\": current_page,\n \"current_time\": datetime.utcnow(),\n \"pushtime\": 10,\n \"deltime\": 999,\n \"page\": page,\n \"per_page\": per_page,\n \"pagination\": foot_bar\n }\n return render_template('main/update.html', **result)\n\n def post(self, page):\n data = request.get_json()\n if data['action'] == 'post':\n title = data[\"title\"]\n env = Env()\n current_weight = env.get(\"CUTTING_WEIGHT_INIT\")\n entry = WaitingQueue.query.filter_by(title=title).first()\n if entry:\n entry.cutting_weight = current_weight + 1 # FIXME: 即使条目处于权重最高状态亦可增加权限\n entry.save()\n env.set(\"CUTTING_WEIGHT_INIT\", entry.cutting_weight)\n elif data['action'] == 'del':\n title = data['title']\n UserOperation(user_id=current_user.id, operation=Operation.DELETE, title=title).save()\n query = WaitingQueue.query.filter_by(title=data['title']).first()\n if query:\n query.delete()\n response = jsonify({'result': True})\n return response\n\n\nclass ManualUpdate(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = PushForm\n\n def get(self):\n return render_template('main/mupdate.html', form=self.form(), pushtime=10)\n\n def post(self):\n if not current_user.can(Permission.MANUAL_PUSH):\n flash(u\"你没有权限\")\n\n form = self.form(request.form)\n if not form.validate():\n flash(u\"条目格式有问题,请检查并重新填写\")\n\n title = form.pushtitle.data\n result = self.check_push_validate(title.encode(\"utf-8\"))\n if not result:\n flash(u\"推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表\")\n\n try:\n image = MoegirlImage(title)\n except HTTPError as e:\n flash(u\"请求萌百错误,错误码如下{},请联系管理员\".format(e))\n return redirect(url_for('main.mupdate'))\n if not image.path:\n flash(u\"无法取得图片,请重试\")\n\n entry = WaitingQueue(title=title, image=image.path)\n env = Env()\n current_weight = env.get(\"CUTTING_WEIGHT_INIT\")\n entry.cutting_weight = current_weight + 1\n entry.save()\n env.set(\"CUTTING_WEIGHT_INIT\", entry.cutting_weight)\n UserOperation(user_id=current_user.id, title=title, operation=Operation.PUSH).save()\n if form.industry.data:\n try:\n from koushihime.crontab import push\n push()\n except Exception as e:\n flash(u\"推送失败: {}\".format(str(e)))\n flash(u\"操作成功,词条将立即推送\")\n return redirect(url_for('main.mupdate'))\n\n @staticmethod\n def check_push_validate(title):\n moegirl_entry = MoegirlQuery(title)\n namespace = moegirl_entry.get_namespace()\n if namespace is 0:\n baned_from_moegirl = moegirl_entry.banned_moegirl_category()\n baned_from_regex = moegirl_entry.ban_from_regex()\n has_pushed = recent_have_pushed(title.decode(\"utf-8\")) # TODO: 改成自动冒泡\n has_catched = have_auto_catched(title.decode(\"utf-8\"))\n result = baned_from_moegirl is False \\\n and has_pushed is False \\\n and has_catched is False \\\n and baned_from_regex is False\n return result\n else:\n return False\n\n\nclass UserInfo(MethodView):\n decorators = [login_required]\n\n def get(self, username):\n is_admin = current_user.can(Permission.ADMINISTER)\n if current_user.username == username or is_admin is True:\n user_info = User.query.filter_by(username=username, deleted=False).first()\n if not user_info:\n abort(404)\n return render_template('main/user.html', u=user_info, username=user_info.username)\n else:\n abort(403)\n\n\nclass UserList(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = AddUserForm\n\n def get(self):\n userlist = User.query.filter_by(deleted=False).all()\n return render_template('main/userlist.html', userlist=userlist, form=self.form())\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({\"status\": 302, \"location\": url_for('main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.username.data,\n role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username: # 用户访问自己的个人信息编辑页\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n else:\n if current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n else:\n if current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc())\\\n .paginate(page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page,\n total=count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items,\n page=page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n # TODO: 把关键词读入配置减少查询次数\n pagination = BanList.query.filter_by(deleted=False)\\\n .paginate(page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page,\n total=count, format_total=True, format_number=True)\n template_param = {\n 'keywords': pagination.items,\n 'page': page,\n 'per_page': per_page,\n 'pagination': foot_bar,\n 'form': self.form()\n }\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({\"status\": 302, \"location\": url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit)\n status.save()\n flash(u'添加关键词成功')\n else:\n if exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\n# TODO: deprecated\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get(\"code\")\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n # config = current_app.config[\"WEIBO_AUTH_CONFIG\"]\n # callback = config[\"CALLBACK\"]\n # app_key = config[\"APP_KEY\"]\n # app_secret_key = config[\"APP_SECRET\"]\n try:\n pass\n # client = APIClient(app_key=app_key, app_secret=app_secret_key, redirect_uri=callback)\n # token_data = client.request_access_token(self.auth_code)\n # access_token, expires_in = token_data.access_token, token_data.expires_in\n except BaseException as e:\n return e\n # config[\"ACCESS_TOKEN\"] = access_token\n # config[\"EXPIRE_TIME\"] = expires_in\n # env = Env()\n # env.set(\"ACCESS_TOKEN\", access_token)\n # env = Env()\n # env.set(\"EXPIRE_TIME\", expires_in)\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(), pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u\"表单不合法\")\n cookie = form.cookie.data\n env = Env()\n env.set(\"COOKIE\", cookie)\n flash(u\"设置 Cookie 成功\")\n return redirect(url_for('main.cookie'))\n",
"step-ids": [
17,
24,
27,
37,
47
]
}
|
[
17,
24,
27,
37,
47
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.namedWindow('Measure Angle with centerline')
<|reserved_special_token_0|>
while True:
ret, frame = vidCapture.read()
if ret == True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
vidCapture.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = vidCapture.read()
if ret == True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
vidCapture.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = vidCapture.read()
if ret == True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
vidCapture.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
# WebCam Initialize
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
# key = cv2.waitKey(1) & 0xFF
# if key == 27:
# break
ret, frame = vidCapture.read()
if ret==True:
# frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# img = np.zeros((512, 512, 3), np.uint8)
# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)
# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)
# cv2.imshow('frame', frame)
vidCapture.release()
out.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "500d6f473f07b35bf2d075d3061ac2e54eab702a",
"index": 4156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.namedWindow('Measure Angle with centerline')\n<mask token>\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\r\nimport cv2\r\n\r\nFRAME_WIDTH = 320\r\nFRAME_HEIGHT = 240\r\n\r\ncv2.namedWindow('Measure Angle with centerline')\r\n\r\n# WebCam Initialize\r\nvidCapture = cv2.VideoCapture(1)\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID') \r\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480)) \r\n\r\nwhile True:\r\n\r\n\t# key = cv2.waitKey(1) & 0xFF\r\n\t# if key == 27:\r\n\t# \tbreak\r\n\r\n\tret, frame = vidCapture.read()\r\n\t\r\n\tif ret==True:\r\n\t\t# frame = cv2.flip(frame,0)\r\n\r\n # write the flipped frame\r\n\t\tout.write(frame)\r\n\r\n\t\tcv2.imshow('frame',frame)\r\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\r\n\t\t\tbreak\r\n\telse:\r\n\t\tbreak\r\n\t# img = np.zeros((512, 512, 3), np.uint8)\r\n\t# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)\r\n\t# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)\r\n\r\n\t# cv2.imshow('frame', frame)\r\n\r\nvidCapture.release()\r\nout.release()\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_prob_age(uids, prob_age) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby('uid').count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, 'gid']
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_prob_age(uids, prob_age) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby('uid').count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, 'gid']
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
def main():
with open('gid2age.pkl', 'rb') as fin:
gid2age = pickle.load(fin)
with open('uid2age.pkl', 'rb') as fin:
uid2age = pickle.load(fin)
with open('uid_friends.pkl', 'rb') as fin:
uid_friends = pickle.load(fin)
with open('scaler.pkl', 'rb') as fin:
scaler = pickle.load(fin)
model = CatBoostRegressor()
model.load_model('model')
test = pd.read_csv('/tmp/data/test.csv')
testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')
testGroups = pd.read_csv('/tmp/data/testGroups.csv')
test['cfriends'] = 0
for index in test.index:
uid = test.at[index, 'uid']
if uid in uid_friends:
test.at[index, 'cfriends'] = len(uid_friends[uid])
else:
test.at[index, 'cfriends'] = 0
prob_age, grads_count = calculate_probable_age(testEducationFeatures)
test['prob_age'] = get_prob_age(test.uid, prob_age)
test['grads_count'] = get_grads_count(test.uid, grads_count)
test['groups_count'] = get_groups_count(test.uid, testGroups)
uid_groups = {}
for index in testGroups.index:
uid = testGroups.at[index, 'uid']
uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[
index, 'gid']]
test['mean_group_age'], test['median_group_age'
] = get_mean_and_median_group(test.uid, gid2age, uid_groups)
test['mean_friends_age'], test['median_friends_age'], test[
'mean_friends2_age'] = get_mean_and_median_friends(test.uid,
uid2age, uid_friends)
test['is_prob_age'] = test.prob_age != 0
test['is_group_age'] = test.mean_group_age != 0
test['is_friends_age'] = test.mean_friends_age != 0
X_test = scaler.transform(test.drop(['uid'], axis=1))
y_pred = model.predict(X_test)
res = pd.DataFrame({'uid': test.uid, 'age': y_pred})
res.to_csv('/var/log/result', header=True, index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate_probable_age(usersEducationFeatures):
prob_age = {}
grads_count = {}
age_diff1 = 17
age_diff2 = 22
for index in usersEducationFeatures.index:
count = 0
skip = False
if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):
prob_age[usersEducationFeatures.at[index, 'uid']
] = 2021 + age_diff1 - usersEducationFeatures.at[index,
'school_education']
skip = True
for i in range(1, 8):
if skip:
break
if not pd.isnull(usersEducationFeatures.at[index,
f'graduation_{i}']):
prob_age[usersEducationFeatures.at[index, 'uid']
] = 2021 + age_diff2 - usersEducationFeatures.at[index,
f'graduation_{i}']
skip = True
if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):
count += 1
for i in range(1, 8):
if not pd.isnull(usersEducationFeatures.at[index,
f'graduation_{i}']):
count += 1
grads_count[usersEducationFeatures.at[index, 'uid']] = count
return prob_age, grads_count
def get_prob_age(uids, prob_age) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby('uid').count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, 'gid']
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
def main():
with open('gid2age.pkl', 'rb') as fin:
gid2age = pickle.load(fin)
with open('uid2age.pkl', 'rb') as fin:
uid2age = pickle.load(fin)
with open('uid_friends.pkl', 'rb') as fin:
uid_friends = pickle.load(fin)
with open('scaler.pkl', 'rb') as fin:
scaler = pickle.load(fin)
model = CatBoostRegressor()
model.load_model('model')
test = pd.read_csv('/tmp/data/test.csv')
testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')
testGroups = pd.read_csv('/tmp/data/testGroups.csv')
test['cfriends'] = 0
for index in test.index:
uid = test.at[index, 'uid']
if uid in uid_friends:
test.at[index, 'cfriends'] = len(uid_friends[uid])
else:
test.at[index, 'cfriends'] = 0
prob_age, grads_count = calculate_probable_age(testEducationFeatures)
test['prob_age'] = get_prob_age(test.uid, prob_age)
test['grads_count'] = get_grads_count(test.uid, grads_count)
test['groups_count'] = get_groups_count(test.uid, testGroups)
uid_groups = {}
for index in testGroups.index:
uid = testGroups.at[index, 'uid']
uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[
index, 'gid']]
test['mean_group_age'], test['median_group_age'
] = get_mean_and_median_group(test.uid, gid2age, uid_groups)
test['mean_friends_age'], test['median_friends_age'], test[
'mean_friends2_age'] = get_mean_and_median_friends(test.uid,
uid2age, uid_friends)
test['is_prob_age'] = test.prob_age != 0
test['is_group_age'] = test.mean_group_age != 0
test['is_friends_age'] = test.mean_friends_age != 0
X_test = scaler.transform(test.drop(['uid'], axis=1))
y_pred = model.predict(X_test)
res = pd.DataFrame({'uid': test.uid, 'age': y_pred})
res.to_csv('/var/log/result', header=True, index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from typing import List
import pandas as pd
import numpy as np
import pickle
from catboost import CatBoostRegressor
from sklearn.preprocessing import MinMaxScaler
def calculate_probable_age(usersEducationFeatures):
prob_age = {}
grads_count = {}
age_diff1 = 17
age_diff2 = 22
for index in usersEducationFeatures.index:
count = 0
skip = False
if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):
prob_age[usersEducationFeatures.at[index, 'uid']
] = 2021 + age_diff1 - usersEducationFeatures.at[index,
'school_education']
skip = True
for i in range(1, 8):
if skip:
break
if not pd.isnull(usersEducationFeatures.at[index,
f'graduation_{i}']):
prob_age[usersEducationFeatures.at[index, 'uid']
] = 2021 + age_diff2 - usersEducationFeatures.at[index,
f'graduation_{i}']
skip = True
if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):
count += 1
for i in range(1, 8):
if not pd.isnull(usersEducationFeatures.at[index,
f'graduation_{i}']):
count += 1
grads_count[usersEducationFeatures.at[index, 'uid']] = count
return prob_age, grads_count
def get_prob_age(uids, prob_age) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) ->List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby('uid').count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, 'gid']
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
def main():
with open('gid2age.pkl', 'rb') as fin:
gid2age = pickle.load(fin)
with open('uid2age.pkl', 'rb') as fin:
uid2age = pickle.load(fin)
with open('uid_friends.pkl', 'rb') as fin:
uid_friends = pickle.load(fin)
with open('scaler.pkl', 'rb') as fin:
scaler = pickle.load(fin)
model = CatBoostRegressor()
model.load_model('model')
test = pd.read_csv('/tmp/data/test.csv')
testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')
testGroups = pd.read_csv('/tmp/data/testGroups.csv')
test['cfriends'] = 0
for index in test.index:
uid = test.at[index, 'uid']
if uid in uid_friends:
test.at[index, 'cfriends'] = len(uid_friends[uid])
else:
test.at[index, 'cfriends'] = 0
prob_age, grads_count = calculate_probable_age(testEducationFeatures)
test['prob_age'] = get_prob_age(test.uid, prob_age)
test['grads_count'] = get_grads_count(test.uid, grads_count)
test['groups_count'] = get_groups_count(test.uid, testGroups)
uid_groups = {}
for index in testGroups.index:
uid = testGroups.at[index, 'uid']
uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[
index, 'gid']]
test['mean_group_age'], test['median_group_age'
] = get_mean_and_median_group(test.uid, gid2age, uid_groups)
test['mean_friends_age'], test['median_friends_age'], test[
'mean_friends2_age'] = get_mean_and_median_friends(test.uid,
uid2age, uid_friends)
test['is_prob_age'] = test.prob_age != 0
test['is_group_age'] = test.mean_group_age != 0
test['is_friends_age'] = test.mean_friends_age != 0
X_test = scaler.transform(test.drop(['uid'], axis=1))
y_pred = model.predict(X_test)
res = pd.DataFrame({'uid': test.uid, 'age': y_pred})
res.to_csv('/var/log/result', header=True, index=False)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from typing import List
import pandas as pd
import numpy as np
import pickle
from catboost import CatBoostRegressor
from sklearn.preprocessing import MinMaxScaler
def calculate_probable_age(usersEducationFeatures):
prob_age = {}
grads_count = {}
age_diff1 = 17 # age difference for school
age_diff2 = 22 # age difference for university
for index in usersEducationFeatures.index:
count = 0
skip = False
if not pd.isnull(usersEducationFeatures.at[index, "school_education"]):
prob_age[usersEducationFeatures.at[index, "uid"]] = (
2021 + age_diff1 - usersEducationFeatures.at[index, "school_education"]
)
skip = True
for i in range(1, 8):
if skip:
break
if not pd.isnull(usersEducationFeatures.at[index, f"graduation_{i}"]):
prob_age[usersEducationFeatures.at[index, "uid"]] = (
2021 + age_diff2 - usersEducationFeatures.at[index, f"graduation_{i}"]
)
skip = True
if not pd.isnull(usersEducationFeatures.at[index, "school_education"]):
count += 1
for i in range(1, 8):
if not pd.isnull(usersEducationFeatures.at[index, f"graduation_{i}"]):
count += 1
grads_count[usersEducationFeatures.at[index, "uid"]] = count
return prob_age, grads_count
def get_prob_age(uids, prob_age) -> List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) -> List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby("uid").count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, "gid"]
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
def main():
with open("gid2age.pkl", "rb") as fin:
gid2age = pickle.load(fin)
with open("uid2age.pkl", "rb") as fin:
uid2age = pickle.load(fin)
with open("uid_friends.pkl", "rb") as fin:
uid_friends = pickle.load(fin)
with open("scaler.pkl", "rb") as fin:
scaler = pickle.load(fin)
model = CatBoostRegressor()
model.load_model("model")
test = pd.read_csv("/tmp/data/test.csv")
testEducationFeatures = pd.read_csv("/tmp/data/testEducationFeatures.csv")
testGroups = pd.read_csv("/tmp/data/testGroups.csv")
test["cfriends"] = 0
for index in test.index:
uid = test.at[index, "uid"]
if uid in uid_friends:
test.at[index, "cfriends"] = len(uid_friends[uid])
else:
test.at[index, "cfriends"] = 0
prob_age, grads_count = calculate_probable_age(testEducationFeatures)
test["prob_age"] = get_prob_age(test.uid, prob_age)
test["grads_count"] = get_grads_count(test.uid, grads_count)
test["groups_count"] = get_groups_count(test.uid, testGroups)
uid_groups = {}
for index in testGroups.index:
uid = testGroups.at[index, "uid"]
uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[index, "gid"]]
test["mean_group_age"], test["median_group_age"] = get_mean_and_median_group(test.uid, gid2age, uid_groups)
test["mean_friends_age"], test["median_friends_age"], test["mean_friends2_age"] = get_mean_and_median_friends(
test.uid, uid2age, uid_friends
)
test["is_prob_age"] = test.prob_age != 0
test["is_group_age"] = test.mean_group_age != 0
test["is_friends_age"] = test.mean_friends_age != 0
X_test = scaler.transform(test.drop(["uid"], axis=1))
y_pred = model.predict(X_test)
res = pd.DataFrame({"uid": test.uid, "age": y_pred})
res.to_csv("/var/log/result", header=True, index=False)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "ee0ed255b6851696dc57c01100cd67f5f959cf01",
"index": 7437,
"step-1": "<mask token>\n\n\ndef get_prob_age(uids, prob_age) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = prob_age.setdefault(uid, 0)\n return res\n\n\ndef get_grads_count(uids, grads_count) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = grads_count.setdefault(uid, 0)\n return res\n\n\ndef get_groups_count(uids, usersGroups):\n tmp = usersGroups.groupby('uid').count()\n groups_count = [0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n groups_count[i] = tmp.at[uid, 'gid']\n except:\n continue\n return groups_count\n\n\ndef get_mean_and_median_group(uids, gid2age, uid_groups):\n mean_group = [0.0] * len(uids)\n median_group = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = [gid2age[x] for x in uid_groups[uid]]\n mean_group[i] = sum(tmp) / len(tmp)\n median_group[i] = np.median(tmp)\n except:\n continue\n return mean_group, median_group\n\n\ndef get_mean_and_median_friends(uids, uid2age, uid_friends):\n mean_friends = [0.0] * len(uids)\n median_friends = [0.0] * len(uids)\n mean_friends2 = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = []\n if uid in uid_friends and len(uid_friends[uid]) < 42:\n for friend in uid_friends[uid]:\n if friend in uid_friends:\n for f2 in uid_friends[friend]:\n if f2 != uid and f2 in uid2age:\n tmp.append(uid2age[f2])\n mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0\n tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]\n mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0\n median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0\n except:\n continue\n return mean_friends, median_friends, mean_friends2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_prob_age(uids, prob_age) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = prob_age.setdefault(uid, 0)\n return res\n\n\ndef get_grads_count(uids, grads_count) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = grads_count.setdefault(uid, 0)\n return res\n\n\ndef get_groups_count(uids, usersGroups):\n tmp = usersGroups.groupby('uid').count()\n groups_count = [0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n groups_count[i] = tmp.at[uid, 'gid']\n except:\n continue\n return groups_count\n\n\ndef get_mean_and_median_group(uids, gid2age, uid_groups):\n mean_group = [0.0] * len(uids)\n median_group = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = [gid2age[x] for x in uid_groups[uid]]\n mean_group[i] = sum(tmp) / len(tmp)\n median_group[i] = np.median(tmp)\n except:\n continue\n return mean_group, median_group\n\n\ndef get_mean_and_median_friends(uids, uid2age, uid_friends):\n mean_friends = [0.0] * len(uids)\n median_friends = [0.0] * len(uids)\n mean_friends2 = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = []\n if uid in uid_friends and len(uid_friends[uid]) < 42:\n for friend in uid_friends[uid]:\n if friend in uid_friends:\n for f2 in uid_friends[friend]:\n if f2 != uid and f2 in uid2age:\n tmp.append(uid2age[f2])\n mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0\n tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]\n mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0\n median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0\n except:\n continue\n return mean_friends, median_friends, mean_friends2\n\n\ndef main():\n with open('gid2age.pkl', 'rb') as fin:\n gid2age = pickle.load(fin)\n with open('uid2age.pkl', 'rb') as fin:\n uid2age = pickle.load(fin)\n with open('uid_friends.pkl', 'rb') as fin:\n uid_friends = pickle.load(fin)\n with open('scaler.pkl', 'rb') as fin:\n scaler = pickle.load(fin)\n model = CatBoostRegressor()\n model.load_model('model')\n test = pd.read_csv('/tmp/data/test.csv')\n testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')\n testGroups = pd.read_csv('/tmp/data/testGroups.csv')\n test['cfriends'] = 0\n for index in test.index:\n uid = test.at[index, 'uid']\n if uid in uid_friends:\n test.at[index, 'cfriends'] = len(uid_friends[uid])\n else:\n test.at[index, 'cfriends'] = 0\n prob_age, grads_count = calculate_probable_age(testEducationFeatures)\n test['prob_age'] = get_prob_age(test.uid, prob_age)\n test['grads_count'] = get_grads_count(test.uid, grads_count)\n test['groups_count'] = get_groups_count(test.uid, testGroups)\n uid_groups = {}\n for index in testGroups.index:\n uid = testGroups.at[index, 'uid']\n uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[\n index, 'gid']]\n test['mean_group_age'], test['median_group_age'\n ] = get_mean_and_median_group(test.uid, gid2age, uid_groups)\n test['mean_friends_age'], test['median_friends_age'], test[\n 'mean_friends2_age'] = get_mean_and_median_friends(test.uid,\n uid2age, uid_friends)\n test['is_prob_age'] = test.prob_age != 0\n test['is_group_age'] = test.mean_group_age != 0\n test['is_friends_age'] = test.mean_friends_age != 0\n X_test = scaler.transform(test.drop(['uid'], axis=1))\n y_pred = model.predict(X_test)\n res = pd.DataFrame({'uid': test.uid, 'age': y_pred})\n res.to_csv('/var/log/result', header=True, index=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate_probable_age(usersEducationFeatures):\n prob_age = {}\n grads_count = {}\n age_diff1 = 17\n age_diff2 = 22\n for index in usersEducationFeatures.index:\n count = 0\n skip = False\n if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):\n prob_age[usersEducationFeatures.at[index, 'uid']\n ] = 2021 + age_diff1 - usersEducationFeatures.at[index,\n 'school_education']\n skip = True\n for i in range(1, 8):\n if skip:\n break\n if not pd.isnull(usersEducationFeatures.at[index,\n f'graduation_{i}']):\n prob_age[usersEducationFeatures.at[index, 'uid']\n ] = 2021 + age_diff2 - usersEducationFeatures.at[index,\n f'graduation_{i}']\n skip = True\n if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):\n count += 1\n for i in range(1, 8):\n if not pd.isnull(usersEducationFeatures.at[index,\n f'graduation_{i}']):\n count += 1\n grads_count[usersEducationFeatures.at[index, 'uid']] = count\n return prob_age, grads_count\n\n\ndef get_prob_age(uids, prob_age) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = prob_age.setdefault(uid, 0)\n return res\n\n\ndef get_grads_count(uids, grads_count) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = grads_count.setdefault(uid, 0)\n return res\n\n\ndef get_groups_count(uids, usersGroups):\n tmp = usersGroups.groupby('uid').count()\n groups_count = [0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n groups_count[i] = tmp.at[uid, 'gid']\n except:\n continue\n return groups_count\n\n\ndef get_mean_and_median_group(uids, gid2age, uid_groups):\n mean_group = [0.0] * len(uids)\n median_group = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = [gid2age[x] for x in uid_groups[uid]]\n mean_group[i] = sum(tmp) / len(tmp)\n median_group[i] = np.median(tmp)\n except:\n continue\n return mean_group, median_group\n\n\ndef get_mean_and_median_friends(uids, uid2age, uid_friends):\n mean_friends = [0.0] * len(uids)\n median_friends = [0.0] * len(uids)\n mean_friends2 = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = []\n if uid in uid_friends and len(uid_friends[uid]) < 42:\n for friend in uid_friends[uid]:\n if friend in uid_friends:\n for f2 in uid_friends[friend]:\n if f2 != uid and f2 in uid2age:\n tmp.append(uid2age[f2])\n mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0\n tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]\n mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0\n median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0\n except:\n continue\n return mean_friends, median_friends, mean_friends2\n\n\ndef main():\n with open('gid2age.pkl', 'rb') as fin:\n gid2age = pickle.load(fin)\n with open('uid2age.pkl', 'rb') as fin:\n uid2age = pickle.load(fin)\n with open('uid_friends.pkl', 'rb') as fin:\n uid_friends = pickle.load(fin)\n with open('scaler.pkl', 'rb') as fin:\n scaler = pickle.load(fin)\n model = CatBoostRegressor()\n model.load_model('model')\n test = pd.read_csv('/tmp/data/test.csv')\n testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')\n testGroups = pd.read_csv('/tmp/data/testGroups.csv')\n test['cfriends'] = 0\n for index in test.index:\n uid = test.at[index, 'uid']\n if uid in uid_friends:\n test.at[index, 'cfriends'] = len(uid_friends[uid])\n else:\n test.at[index, 'cfriends'] = 0\n prob_age, grads_count = calculate_probable_age(testEducationFeatures)\n test['prob_age'] = get_prob_age(test.uid, prob_age)\n test['grads_count'] = get_grads_count(test.uid, grads_count)\n test['groups_count'] = get_groups_count(test.uid, testGroups)\n uid_groups = {}\n for index in testGroups.index:\n uid = testGroups.at[index, 'uid']\n uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[\n index, 'gid']]\n test['mean_group_age'], test['median_group_age'\n ] = get_mean_and_median_group(test.uid, gid2age, uid_groups)\n test['mean_friends_age'], test['median_friends_age'], test[\n 'mean_friends2_age'] = get_mean_and_median_friends(test.uid,\n uid2age, uid_friends)\n test['is_prob_age'] = test.prob_age != 0\n test['is_group_age'] = test.mean_group_age != 0\n test['is_friends_age'] = test.mean_friends_age != 0\n X_test = scaler.transform(test.drop(['uid'], axis=1))\n y_pred = model.predict(X_test)\n res = pd.DataFrame({'uid': test.uid, 'age': y_pred})\n res.to_csv('/var/log/result', header=True, index=False)\n\n\n<mask token>\n",
"step-4": "from typing import List\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom catboost import CatBoostRegressor\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef calculate_probable_age(usersEducationFeatures):\n prob_age = {}\n grads_count = {}\n age_diff1 = 17\n age_diff2 = 22\n for index in usersEducationFeatures.index:\n count = 0\n skip = False\n if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):\n prob_age[usersEducationFeatures.at[index, 'uid']\n ] = 2021 + age_diff1 - usersEducationFeatures.at[index,\n 'school_education']\n skip = True\n for i in range(1, 8):\n if skip:\n break\n if not pd.isnull(usersEducationFeatures.at[index,\n f'graduation_{i}']):\n prob_age[usersEducationFeatures.at[index, 'uid']\n ] = 2021 + age_diff2 - usersEducationFeatures.at[index,\n f'graduation_{i}']\n skip = True\n if not pd.isnull(usersEducationFeatures.at[index, 'school_education']):\n count += 1\n for i in range(1, 8):\n if not pd.isnull(usersEducationFeatures.at[index,\n f'graduation_{i}']):\n count += 1\n grads_count[usersEducationFeatures.at[index, 'uid']] = count\n return prob_age, grads_count\n\n\ndef get_prob_age(uids, prob_age) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = prob_age.setdefault(uid, 0)\n return res\n\n\ndef get_grads_count(uids, grads_count) ->List[int]:\n res = [0] * len(uids)\n for i, uid in enumerate(uids):\n res[i] = grads_count.setdefault(uid, 0)\n return res\n\n\ndef get_groups_count(uids, usersGroups):\n tmp = usersGroups.groupby('uid').count()\n groups_count = [0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n groups_count[i] = tmp.at[uid, 'gid']\n except:\n continue\n return groups_count\n\n\ndef get_mean_and_median_group(uids, gid2age, uid_groups):\n mean_group = [0.0] * len(uids)\n median_group = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = [gid2age[x] for x in uid_groups[uid]]\n mean_group[i] = sum(tmp) / len(tmp)\n median_group[i] = np.median(tmp)\n except:\n continue\n return mean_group, median_group\n\n\ndef get_mean_and_median_friends(uids, uid2age, uid_friends):\n mean_friends = [0.0] * len(uids)\n median_friends = [0.0] * len(uids)\n mean_friends2 = [0.0] * len(uids)\n for i, uid in enumerate(uids):\n try:\n tmp = []\n if uid in uid_friends and len(uid_friends[uid]) < 42:\n for friend in uid_friends[uid]:\n if friend in uid_friends:\n for f2 in uid_friends[friend]:\n if f2 != uid and f2 in uid2age:\n tmp.append(uid2age[f2])\n mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0\n tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]\n mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0\n median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0\n except:\n continue\n return mean_friends, median_friends, mean_friends2\n\n\ndef main():\n with open('gid2age.pkl', 'rb') as fin:\n gid2age = pickle.load(fin)\n with open('uid2age.pkl', 'rb') as fin:\n uid2age = pickle.load(fin)\n with open('uid_friends.pkl', 'rb') as fin:\n uid_friends = pickle.load(fin)\n with open('scaler.pkl', 'rb') as fin:\n scaler = pickle.load(fin)\n model = CatBoostRegressor()\n model.load_model('model')\n test = pd.read_csv('/tmp/data/test.csv')\n testEducationFeatures = pd.read_csv('/tmp/data/testEducationFeatures.csv')\n testGroups = pd.read_csv('/tmp/data/testGroups.csv')\n test['cfriends'] = 0\n for index in test.index:\n uid = test.at[index, 'uid']\n if uid in uid_friends:\n test.at[index, 'cfriends'] = len(uid_friends[uid])\n else:\n test.at[index, 'cfriends'] = 0\n prob_age, grads_count = calculate_probable_age(testEducationFeatures)\n test['prob_age'] = get_prob_age(test.uid, prob_age)\n test['grads_count'] = get_grads_count(test.uid, grads_count)\n test['groups_count'] = get_groups_count(test.uid, testGroups)\n uid_groups = {}\n for index in testGroups.index:\n uid = testGroups.at[index, 'uid']\n uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[\n index, 'gid']]\n test['mean_group_age'], test['median_group_age'\n ] = get_mean_and_median_group(test.uid, gid2age, uid_groups)\n test['mean_friends_age'], test['median_friends_age'], test[\n 'mean_friends2_age'] = get_mean_and_median_friends(test.uid,\n uid2age, uid_friends)\n test['is_prob_age'] = test.prob_age != 0\n test['is_group_age'] = test.mean_group_age != 0\n test['is_friends_age'] = test.mean_friends_age != 0\n X_test = scaler.transform(test.drop(['uid'], axis=1))\n y_pred = model.predict(X_test)\n res = pd.DataFrame({'uid': test.uid, 'age': y_pred})\n res.to_csv('/var/log/result', header=True, index=False)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from typing import List\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom catboost import CatBoostRegressor\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\ndef calculate_probable_age(usersEducationFeatures):\r\n prob_age = {}\r\n grads_count = {}\r\n age_diff1 = 17 # age difference for school\r\n age_diff2 = 22 # age difference for university\r\n for index in usersEducationFeatures.index:\r\n count = 0\r\n skip = False\r\n\r\n if not pd.isnull(usersEducationFeatures.at[index, \"school_education\"]):\r\n prob_age[usersEducationFeatures.at[index, \"uid\"]] = (\r\n 2021 + age_diff1 - usersEducationFeatures.at[index, \"school_education\"]\r\n )\r\n skip = True\r\n for i in range(1, 8):\r\n if skip:\r\n break\r\n if not pd.isnull(usersEducationFeatures.at[index, f\"graduation_{i}\"]):\r\n prob_age[usersEducationFeatures.at[index, \"uid\"]] = (\r\n 2021 + age_diff2 - usersEducationFeatures.at[index, f\"graduation_{i}\"]\r\n )\r\n skip = True\r\n\r\n if not pd.isnull(usersEducationFeatures.at[index, \"school_education\"]):\r\n count += 1\r\n for i in range(1, 8):\r\n if not pd.isnull(usersEducationFeatures.at[index, f\"graduation_{i}\"]):\r\n count += 1\r\n\r\n grads_count[usersEducationFeatures.at[index, \"uid\"]] = count\r\n return prob_age, grads_count\r\n\r\n\r\ndef get_prob_age(uids, prob_age) -> List[int]:\r\n res = [0] * len(uids)\r\n for i, uid in enumerate(uids):\r\n res[i] = prob_age.setdefault(uid, 0)\r\n return res\r\n\r\n\r\ndef get_grads_count(uids, grads_count) -> List[int]:\r\n res = [0] * len(uids)\r\n for i, uid in enumerate(uids):\r\n res[i] = grads_count.setdefault(uid, 0)\r\n return res\r\n\r\n\r\ndef get_groups_count(uids, usersGroups):\r\n tmp = usersGroups.groupby(\"uid\").count()\r\n groups_count = [0] * len(uids)\r\n for i, uid in enumerate(uids):\r\n try:\r\n groups_count[i] = tmp.at[uid, \"gid\"]\r\n except:\r\n continue\r\n return groups_count\r\n\r\n\r\ndef get_mean_and_median_group(uids, gid2age, uid_groups):\r\n mean_group = [0.0] * len(uids)\r\n median_group = [0.0] * len(uids)\r\n for i, uid in enumerate(uids):\r\n try:\r\n tmp = [gid2age[x] for x in uid_groups[uid]]\r\n mean_group[i] = sum(tmp) / len(tmp)\r\n median_group[i] = np.median(tmp)\r\n except:\r\n continue\r\n return mean_group, median_group\r\n\r\n\r\ndef get_mean_and_median_friends(uids, uid2age, uid_friends):\r\n mean_friends = [0.0] * len(uids)\r\n median_friends = [0.0] * len(uids)\r\n mean_friends2 = [0.0] * len(uids)\r\n for i, uid in enumerate(uids):\r\n try:\r\n tmp = []\r\n if uid in uid_friends and len(uid_friends[uid]) < 42:\r\n for friend in uid_friends[uid]:\r\n if friend in uid_friends:\r\n for f2 in uid_friends[friend]:\r\n if f2 != uid and f2 in uid2age:\r\n tmp.append(uid2age[f2])\r\n mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0\r\n tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]\r\n mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0\r\n median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0\r\n except:\r\n continue\r\n return mean_friends, median_friends, mean_friends2\r\n\r\n\r\ndef main():\r\n with open(\"gid2age.pkl\", \"rb\") as fin:\r\n gid2age = pickle.load(fin)\r\n with open(\"uid2age.pkl\", \"rb\") as fin:\r\n uid2age = pickle.load(fin)\r\n with open(\"uid_friends.pkl\", \"rb\") as fin:\r\n uid_friends = pickle.load(fin)\r\n with open(\"scaler.pkl\", \"rb\") as fin:\r\n scaler = pickle.load(fin)\r\n model = CatBoostRegressor()\r\n model.load_model(\"model\")\r\n\r\n test = pd.read_csv(\"/tmp/data/test.csv\")\r\n testEducationFeatures = pd.read_csv(\"/tmp/data/testEducationFeatures.csv\")\r\n testGroups = pd.read_csv(\"/tmp/data/testGroups.csv\")\r\n\r\n test[\"cfriends\"] = 0\r\n for index in test.index:\r\n uid = test.at[index, \"uid\"]\r\n if uid in uid_friends:\r\n test.at[index, \"cfriends\"] = len(uid_friends[uid])\r\n else:\r\n test.at[index, \"cfriends\"] = 0\r\n\r\n prob_age, grads_count = calculate_probable_age(testEducationFeatures)\r\n test[\"prob_age\"] = get_prob_age(test.uid, prob_age)\r\n test[\"grads_count\"] = get_grads_count(test.uid, grads_count)\r\n\r\n test[\"groups_count\"] = get_groups_count(test.uid, testGroups)\r\n\r\n uid_groups = {}\r\n for index in testGroups.index:\r\n uid = testGroups.at[index, \"uid\"]\r\n uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[index, \"gid\"]]\r\n\r\n test[\"mean_group_age\"], test[\"median_group_age\"] = get_mean_and_median_group(test.uid, gid2age, uid_groups)\r\n\r\n test[\"mean_friends_age\"], test[\"median_friends_age\"], test[\"mean_friends2_age\"] = get_mean_and_median_friends(\r\n test.uid, uid2age, uid_friends\r\n )\r\n\r\n test[\"is_prob_age\"] = test.prob_age != 0\r\n test[\"is_group_age\"] = test.mean_group_age != 0\r\n test[\"is_friends_age\"] = test.mean_friends_age != 0\r\n\r\n X_test = scaler.transform(test.drop([\"uid\"], axis=1))\r\n\r\n y_pred = model.predict(X_test)\r\n\r\n res = pd.DataFrame({\"uid\": test.uid, \"age\": y_pred})\r\n\r\n res.to_csv(\"/var/log/result\", header=True, index=False)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
# i change it for change1
# change 1.py in master
i = 1
# fix bug for boss
|
normal
|
{
"blob_id": "92f4f1c8a4e04b07ed7c05d5bb733c0b9c28bd05",
"index": 5325,
"step-1": "<mask token>\n",
"step-2": "i = 1\n",
"step-3": "# i change it for change1\n# change 1.py in master\ni = 1\n# fix bug for boss\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#anand python problem 2:29
#Write a function array to create an 2-dimensional array. The function should take both dimensions as arguments. Value of each element can be initialized to None:
#
def array_imp(row,col):
res=[[None]*col for i in range(row) ]
return res
if __name__=='__main__':
outs=array_imp(2,3)
print outs
|
normal
|
{
"blob_id": "b5835b676eb8ac814086f7482f172f48e2ad5a0a",
"index": 8189,
"step-1": "#anand python problem 2:29\n#Write a function array to create an 2-dimensional array. The function should take both dimensions as arguments. Value of each element can be initialized to None:\n#\n\ndef array_imp(row,col):\n\tres=[[None]*col for i in range(row) ]\n\treturn res\n\n\n\n\nif __name__=='__main__':\n\touts=array_imp(2,3)\n\tprint outs\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
|
normal
|
{
"blob_id": "4d0f612c74dc175766f489580fc4a492e1bfd085",
"index": 4345,
"step-1": "<mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n <mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-4": "<mask token>\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) ->List[Any]:\n if self.is_none(field):\n return None\n elif isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(',')]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) ->'Action':\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all('tr')\n for key, val in table.attrs.items():\n if key != 'class':\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find('td', class_='field-key')\n td_val = tr.find('td', class_='field-value')\n val = ''.join(str(e) for e in td_val.contents).strip()\n key = ''.join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-5": "import pandas as pd\nimport math\nimport json\nimport html\nimport bs4\nimport re\nimport dateparser\nfrom bs4 import BeautifulSoup\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Any, List, Dict, ClassVar, Union\nfrom urllib.parse import urlparse\nfrom .markdown import MarkdownData, MarkdownDocument\n\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n\n _meta_fields: ClassVar = [\"author\"]\n\n _valid_struggles: ClassVar = [\n \"ethics\",\n \"pay_and_benefits\",\n \"working_conditions\",\n \"discrimination\",\n \"unfair_labor_practices\",\n \"job_security\",\n ]\n\n _valid_actions: ClassVar = [\n \"strike\",\n \"protest\",\n \"open_letter\",\n \"legal_action\",\n \"union_drive\",\n \"union_representation\",\n ]\n\n @staticmethod\n def is_none(field: Any) -> bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == \"none\":\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) -> List[Any]:\n if self.is_none(field):\n return None\n else:\n if isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(\",\")]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n # self.date = datetime.strptime(self.date, \"%Y-%m-%d\").date()\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n\n self.workers = None if self.is_none(self.workers) else int(self.workers)\n\n # make sure action is a valid action\n assert (\n self.action in self._valid_actions\n ), f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n\n # make sure all struggles are valid struggles\n for struggle in self.struggles:\n assert (\n struggle in self._valid_struggles\n ), f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n\n # make sure source is either a url or a html link tag <a>\n for source in self.sources:\n assert (\n BeautifulSoup(source, \"html.parser\").a is not None\n or urlparse(source).netloc is not \"\"\n ), f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n\n # if html, extract only href from sources\n self.sources = [\n BeautifulSoup(source, \"html.parser\").a[\"href\"]\n if \"href\" in source\n else source\n for source in self.sources\n ]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) -> Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.items()}\n\n def render_df(self, field: str) -> str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in [\"date\", \"workers\"]:\n return str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\", \"sources\"]:\n return str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) -> str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert (\n field in self.__dataclass_fields__\n ), f\"Cannot serialize {field}. Not a valid field in Action.\"\n\n value = self.__getattribute__(field)\n\n if field in [\"date\", \"workers\"]:\n td.string = str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\"]:\n td.string = (\n str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n )\n elif field == \"sources\":\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(\", \".join(ret)), \"html.parser\"))\n else:\n td.string = value\n\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) -> \"Action\":\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all(\"tr\")\n for key, val in table.attrs.items():\n if key != \"class\":\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find(\"td\", class_=\"field-key\")\n td_val = tr.find(\"td\", class_=\"field-value\")\n val = \"\".join(str(e) for e in td_val.contents).strip()\n key = \"\".join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) -> \"Action\":\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [\n key\n for key, value in cls.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n d = {key: value for key, value in row.to_dict().items() if key in fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n\n action_id: ClassVar = \"actions\"\n actions: List[Action] = field(default_factory=lambda: [])\n fields: List[str] = field(\n default_factory=lambda: [\n key\n for key, value in Action.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n )\n\n def __len__(self) -> int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) -> \"Actions\":\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) -> pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f\"<div id={self.action_id}></div>\", \"html.parser\")\n for action in self.actions:\n table = soup.new_tag(\"table\")\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag(\"tr\")\n td_key = soup.new_tag(\"td\", attrs={\"class\": \"field-key\"})\n td_val = soup.new_tag(\"td\", attrs={\"class\": \"field-value\"})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(fr'<div id=\"{cls.action_id}\">+[\\s\\S]+<\\/div>', md_doc)\n assert len(md_data) == 1, f\"multiple divs with id={cls.action_id} were found\"\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, \"html.parser\")\n tables = soup.div.find_all(\"table\")\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-ids": [
10,
13,
19,
23,
25
]
}
|
[
10,
13,
19,
23,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('sms_consumer', '0006_auto_20210923_0733')]
operations = [migrations.RemoveField(model_name='smslogmodel', name=
'hello')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('sms_consumer', '0006_auto_20210923_0733')]
operations = [migrations.RemoveField(model_name='smslogmodel', name=
'hello')]
<|reserved_special_token_1|>
# Generated by Django 3.2.7 on 2021-09-23 07:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sms_consumer', '0006_auto_20210923_0733'),
]
operations = [
migrations.RemoveField(
model_name='smslogmodel',
name='hello',
),
]
|
flexible
|
{
"blob_id": "fc9742ceb3c38a5f8c1ad1f030d76103ba0a7a81",
"index": 3857,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sms_consumer', '0006_auto_20210923_0733')]\n operations = [migrations.RemoveField(model_name='smslogmodel', name=\n 'hello')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('sms_consumer', '0006_auto_20210923_0733')]\n operations = [migrations.RemoveField(model_name='smslogmodel', name=\n 'hello')]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-09-23 07:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sms_consumer', '0006_auto_20210923_0733'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='smslogmodel',\n name='hello',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ItemEffect(AbstractItemEffect):
<|reserved_special_token_0|>
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(item_type=item_type, item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.
ITEM_HEALING_WAND, image_file_path=
'resources/graphics/item_healing_wand.png', item_equipment_category
=ItemEquipmentCategory.MAIN_HAND, name='Healing wand',
custom_description=['When you damage an enemy, gain +' + str(
HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(
BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],
custom_effect=ItemEffect())
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, 'Healing wand')
<|reserved_special_token_1|>
from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect
from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat
from pythongame.core.game_data import UiIconSprite, register_buff_text
from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState
from pythongame.core.item_effects import AbstractItemEffect
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_custom_effect_item
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(item_type=item_type, item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.
ITEM_HEALING_WAND, image_file_path=
'resources/graphics/item_healing_wand.png', item_equipment_category
=ItemEquipmentCategory.MAIN_HAND, name='Healing wand',
custom_description=['When you damage an enemy, gain +' + str(
HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(
BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],
custom_effect=ItemEffect())
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, 'Healing wand')
<|reserved_special_token_1|>
from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect
from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat
from pythongame.core.game_data import UiIconSprite, register_buff_text
from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState
from pythongame.core.item_effects import AbstractItemEffect
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_custom_effect_item
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(
item_type=item_type,
item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,
sprite=Sprite.ITEM_HEALING_WAND,
image_file_path="resources/graphics/item_healing_wand.png",
item_equipment_category=ItemEquipmentCategory.MAIN_HAND,
name="Healing wand",
custom_description=["When you damage an enemy, gain +" + str(HEALTH_REGEN_BONUS) + " health regen for " +
"{:.0f}".format(BUFF_DURATION / 1000) + "s"],
stat_modifier_intervals=[],
custom_effect=ItemEffect()
)
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, "Healing wand")
|
flexible
|
{
"blob_id": "61454a3d6b5b17bff871ededc6ddfe8384043884",
"index": 59,
"step-1": "<mask token>\n\n\nclass ItemEffect(AbstractItemEffect):\n <mask token>\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n",
"step-4": "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n",
"step-5": "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\n\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(\n item_type=item_type,\n item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,\n sprite=Sprite.ITEM_HEALING_WAND,\n image_file_path=\"resources/graphics/item_healing_wand.png\",\n item_equipment_category=ItemEquipmentCategory.MAIN_HAND,\n name=\"Healing wand\",\n custom_description=[\"When you damage an enemy, gain +\" + str(HEALTH_REGEN_BONUS) + \" health regen for \" +\n \"{:.0f}\".format(BUFF_DURATION / 1000) + \"s\"],\n stat_modifier_intervals=[],\n custom_effect=ItemEffect()\n )\n\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, \"Healing wand\")\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Category, MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
<|reserved_special_token_1|>
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from product.models import Item, Product, Category
admin.site.register(Category, MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
<|reserved_special_token_1|>
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from product.models import Item,Product,Category
# Register your models here.
admin.site.register(Category,MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
|
flexible
|
{
"blob_id": "fcd3e4c0d42649833e6c5ff6414c993654691d16",
"index": 188,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Category, MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)\n",
"step-3": "from django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom product.models import Item, Product, Category\nadmin.site.register(Category, MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)\n",
"step-4": "from django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom product.models import Item,Product,Category\n# Register your models here.\nadmin.site.register(Category,MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
<|reserved_special_token_0|>
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
<|reserved_special_token_0|>
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
my_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]
<|reserved_special_token_0|>
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
<|reserved_special_token_0|>
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1]) * 10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))
fq = Fourier_Quad(12, 123)
n = numpy.ones((source_num,))
source = numpy.random.normal(signals[rank], sigma_1, source_num
) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
<|reserved_special_token_1|>
import os
my_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]
import numpy
from sys import path, argv
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
from Fourier_Quad import Fourier_Quad
import tool_box
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1]) * 10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))
fq = Fourier_Quad(12, 123)
n = numpy.ones((source_num,))
source = numpy.random.normal(signals[rank], sigma_1, source_num
) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
<|reserved_special_token_1|>
import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
import numpy
from sys import path, argv
path.append("D:/Github/astrophy-research/mylib")
path.append("D:/Github/astrophy-research/multi_shear_detect")
path.append('%s/work/mylib' % my_home)
from Fourier_Quad import Fourier_Quad
# import h5py
# from plot_tool import Image_Plot
import tool_box
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1])*10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
# bytes for 10 double elements
nbytes = 2*signal_num*itemsize
else:
nbytes = 0
# on rank 0 of comm, create the contiguous shared block
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num)) # array filled with zero
fq = Fourier_Quad(12,123)
n = numpy.ones((source_num, ))
# for i in range(signal_num):
source = numpy.random.normal(signals[rank], sigma_1, source_num) + numpy.random.normal(-signals[rank]/100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8,scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
# result[2] = signals
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
# img = Image_Plot()
# img.subplots(1,1)
# img.axs[0][0].errorbar(signals, result[0], result[1])
# img.axs[0][0].plot([-0.06,0.06],[-0.06, 0.06])
# img.show_img()
|
flexible
|
{
"blob_id": "1ffdc2845bc503c0a30407de444a152f8cc68d57",
"index": 1370,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\n<mask token>\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\n<mask token>\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-3": "<mask token>\nmy_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]\n<mask token>\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\n<mask token>\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\nsource_num = int(argv[1]) * 10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))\nfq = Fourier_Quad(12, 123)\nn = numpy.ones((source_num,))\nsource = numpy.random.normal(signals[rank], sigma_1, source_num\n ) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-4": "import os\nmy_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]\nimport numpy\nfrom sys import path, argv\npath.append('D:/Github/astrophy-research/mylib')\npath.append('D:/Github/astrophy-research/multi_shear_detect')\npath.append('%s/work/mylib' % my_home)\nfrom Fourier_Quad import Fourier_Quad\nimport tool_box\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\nsource_num = int(argv[1]) * 10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n nbytes = 2 * signal_num * itemsize\nelse:\n nbytes = 0\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))\nfq = Fourier_Quad(12, 123)\nn = numpy.ones((source_num,))\nsource = numpy.random.normal(signals[rank], sigma_1, source_num\n ) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n",
"step-5": "import os\nmy_home = os.popen(\"echo $MYWORK_DIR\").readlines()[0][:-1]\nimport numpy\nfrom sys import path, argv\npath.append(\"D:/Github/astrophy-research/mylib\")\npath.append(\"D:/Github/astrophy-research/multi_shear_detect\")\npath.append('%s/work/mylib' % my_home)\nfrom Fourier_Quad import Fourier_Quad\n# import h5py\n# from plot_tool import Image_Plot\nimport tool_box\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.Get_size()\n\nsource_num = int(argv[1])*10000\nsigma_1 = float(argv[2])\nsigma_2 = float(argv[3])\nsignal_num = numprocs\nsignals = numpy.linspace(-0.05, 0.05, signal_num)\n\nitemsize = MPI.DOUBLE.Get_size()\nif rank == 0:\n # bytes for 10 double elements\n nbytes = 2*signal_num*itemsize\nelse:\n nbytes = 0\n\n# on rank 0 of comm, create the contiguous shared block\nwin1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)\nbuf1, itemsize = win1.Shared_query(0)\nresult = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num)) # array filled with zero\n\nfq = Fourier_Quad(12,123)\nn = numpy.ones((source_num, ))\n# for i in range(signal_num):\nsource = numpy.random.normal(signals[rank], sigma_1, source_num) + numpy.random.normal(-signals[rank]/100, sigma_2, source_num)\nsignal_est = fq.find_shear(source, n, 8,scale=100, left=-0.08, right=0.08)[:2]\nresult[:, rank] = signal_est\nprint(rank, signal_est)\ncomm.Barrier()\nif rank == 0:\n # result[2] = signals\n print(signals)\n print(result)\n mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))\n mc[0] = mc[0] - 1\n print(mc)\n# img = Image_Plot()\n# img.subplots(1,1)\n# img.axs[0][0].errorbar(signals, result[0], result[1])\n# img.axs[0][0].plot([-0.06,0.06],[-0.06, 0.06])\n# img.show_img()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import datetime
import httplib2
from apiclient.discovery import build
from flask_login import UserMixin
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from oauth2client.client import OAuth2Credentials
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.types import ARRAY
from app import app
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.Text)
history_id = db.Column(db.Integer)
customer_label_id = db.Column(db.Text)
credentials_json = db.Column(JSONB)
threads = db.relationship('Thread', backref='user', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.email)
@property
def credentials(self):
if self.credentials_json:
return OAuth2Credentials.from_json(self.credentials_json)
else:
return None
@credentials.setter
def credentials(self, cred):
if type(cred) is OAuth2Credentials:
self.credentials_json = cred.to_json()
else:
self.credentials_json = cred
@property
def gmail(self):
http = self.credentials.authorize(httplib2.Http())
return build('gmail', 'v1', http=http)
def sync_inbox(self):
labels = self.gmail.users().labels().list(userId='me').execute()['labels']
if len([label for label in labels if label['name'] == 'Growth']) == 0:
raise Exception('No Growth label found')
for label in labels:
if label['name'] == 'Growth':
self.customer_label_id = label['id']
db.session.add(self)
db.session.commit()
next_page_token = None
while True:
thread_result = self.gmail.users().threads().list(userId='me', labelIds=self.customer_label_id, pageToken=next_page_token).execute()
for thread in thread_result['threads']:
for message in self.gmail.users().threads().get(userId='me', id=thread['id']).execute()['messages']:
data = self.gmail.users().messages().get(userId='me', id=message['id'], format='metadata').execute()
msg = Message(
gmail_id=data['id'],
internal_date=datetime.fromtimestamp(int(data['internalDate']) / 1e3),
snippet=data['snippet'],
subject=[x for x in data['payload']['headers'] if x['name'] == 'Subject'][0]['value'],
sender=[x for x in data['payload']['headers'] if x['name'] == 'From'][0]['value'],
recipient=[x for x in data['payload']['headers'] if x['name'] == 'To'][0]['value'],
)
thread = Thread.query.filter_by(gmail_id=data['threadId']).first()
if not thread:
thread = Thread(gmail_id=data['threadId'], user_id=self.id,)
msg.thread = thread
db.session.add(msg)
db.session.add(thread)
if thread_result.get('nextPageToken'):
next_page_token = thread_result['nextPageToken']
else:
db.session.commit()
break
# pull history_id
# save latest
# setup notifications
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
gmail_id = db.Column(db.Text)
internal_date = db.Column(db.DateTime, nullable=False)
snippet = db.Column(db.Text)
sender = db.Column(db.Text)
recipient = db.Column(db.Text)
cc = db.Column(db.Text)
bcc = db.Column(db.Text)
subject = db.Column(db.Text)
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=False)
class Thread(db.Model):
id = db.Column(db.Integer, primary_key=True)
gmail_id = db.Column(db.Text)
snippet = db.Column(db.Text)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
messages = db.relationship('Message', backref='thread', lazy='dynamic')
|
normal
|
{
"blob_id": "866ec11f6fe13fb2283709128376080afc7493bf",
"index": 5040,
"step-1": "<mask token>\n\n\nclass User(db.Model, UserMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<User {}>'.format(self.email)\n\n @property\n def credentials(self):\n if self.credentials_json:\n return OAuth2Credentials.from_json(self.credentials_json)\n else:\n return None\n <mask token>\n\n @property\n def gmail(self):\n http = self.credentials.authorize(httplib2.Http())\n return build('gmail', 'v1', http=http)\n <mask token>\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n internal_date = db.Column(db.DateTime, nullable=False)\n snippet = db.Column(db.Text)\n sender = db.Column(db.Text)\n recipient = db.Column(db.Text)\n cc = db.Column(db.Text)\n bcc = db.Column(db.Text)\n subject = db.Column(db.Text)\n thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=\n False)\n\n\nclass Thread(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n snippet = db.Column(db.Text)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n messages = db.relationship('Message', backref='thread', lazy='dynamic')\n",
"step-2": "<mask token>\n\n\nclass User(db.Model, UserMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<User {}>'.format(self.email)\n\n @property\n def credentials(self):\n if self.credentials_json:\n return OAuth2Credentials.from_json(self.credentials_json)\n else:\n return None\n\n @credentials.setter\n def credentials(self, cred):\n if type(cred) is OAuth2Credentials:\n self.credentials_json = cred.to_json()\n else:\n self.credentials_json = cred\n\n @property\n def gmail(self):\n http = self.credentials.authorize(httplib2.Http())\n return build('gmail', 'v1', http=http)\n\n def sync_inbox(self):\n labels = self.gmail.users().labels().list(userId='me').execute()[\n 'labels']\n if len([label for label in labels if label['name'] == 'Growth']) == 0:\n raise Exception('No Growth label found')\n for label in labels:\n if label['name'] == 'Growth':\n self.customer_label_id = label['id']\n db.session.add(self)\n db.session.commit()\n next_page_token = None\n while True:\n thread_result = self.gmail.users().threads().list(userId='me',\n labelIds=self.customer_label_id, pageToken=next_page_token\n ).execute()\n for thread in thread_result['threads']:\n for message in self.gmail.users().threads().get(userId='me',\n id=thread['id']).execute()['messages']:\n data = self.gmail.users().messages().get(userId='me',\n id=message['id'], format='metadata').execute()\n msg = Message(gmail_id=data['id'], internal_date=\n datetime.fromtimestamp(int(data['internalDate']) / \n 1000.0), snippet=data['snippet'], subject=[x for x in\n data['payload']['headers'] if x['name'] ==\n 'Subject'][0]['value'], sender=[x for x in data[\n 'payload']['headers'] if x['name'] == 'From'][0][\n 'value'], recipient=[x for x in data['payload'][\n 'headers'] if x['name'] == 'To'][0]['value'])\n thread = Thread.query.filter_by(gmail_id=data['threadId']\n ).first()\n if not thread:\n thread = Thread(gmail_id=data['threadId'], user_id=\n self.id)\n msg.thread = thread\n db.session.add(msg)\n db.session.add(thread)\n if thread_result.get('nextPageToken'):\n next_page_token = thread_result['nextPageToken']\n else:\n db.session.commit()\n break\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n internal_date = db.Column(db.DateTime, nullable=False)\n snippet = db.Column(db.Text)\n sender = db.Column(db.Text)\n recipient = db.Column(db.Text)\n cc = db.Column(db.Text)\n bcc = db.Column(db.Text)\n subject = db.Column(db.Text)\n thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=\n False)\n\n\nclass Thread(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n snippet = db.Column(db.Text)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n messages = db.relationship('Message', backref='thread', lazy='dynamic')\n",
"step-3": "<mask token>\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.Text)\n history_id = db.Column(db.Integer)\n customer_label_id = db.Column(db.Text)\n credentials_json = db.Column(JSONB)\n threads = db.relationship('Thread', backref='user', lazy='dynamic')\n\n def __repr__(self):\n return '<User {}>'.format(self.email)\n\n @property\n def credentials(self):\n if self.credentials_json:\n return OAuth2Credentials.from_json(self.credentials_json)\n else:\n return None\n\n @credentials.setter\n def credentials(self, cred):\n if type(cred) is OAuth2Credentials:\n self.credentials_json = cred.to_json()\n else:\n self.credentials_json = cred\n\n @property\n def gmail(self):\n http = self.credentials.authorize(httplib2.Http())\n return build('gmail', 'v1', http=http)\n\n def sync_inbox(self):\n labels = self.gmail.users().labels().list(userId='me').execute()[\n 'labels']\n if len([label for label in labels if label['name'] == 'Growth']) == 0:\n raise Exception('No Growth label found')\n for label in labels:\n if label['name'] == 'Growth':\n self.customer_label_id = label['id']\n db.session.add(self)\n db.session.commit()\n next_page_token = None\n while True:\n thread_result = self.gmail.users().threads().list(userId='me',\n labelIds=self.customer_label_id, pageToken=next_page_token\n ).execute()\n for thread in thread_result['threads']:\n for message in self.gmail.users().threads().get(userId='me',\n id=thread['id']).execute()['messages']:\n data = self.gmail.users().messages().get(userId='me',\n id=message['id'], format='metadata').execute()\n msg = Message(gmail_id=data['id'], internal_date=\n datetime.fromtimestamp(int(data['internalDate']) / \n 1000.0), snippet=data['snippet'], subject=[x for x in\n data['payload']['headers'] if x['name'] ==\n 'Subject'][0]['value'], sender=[x for x in data[\n 'payload']['headers'] if x['name'] == 'From'][0][\n 'value'], recipient=[x for x in data['payload'][\n 'headers'] if x['name'] == 'To'][0]['value'])\n thread = Thread.query.filter_by(gmail_id=data['threadId']\n ).first()\n if not thread:\n thread = Thread(gmail_id=data['threadId'], user_id=\n self.id)\n msg.thread = thread\n db.session.add(msg)\n db.session.add(thread)\n if thread_result.get('nextPageToken'):\n next_page_token = thread_result['nextPageToken']\n else:\n db.session.commit()\n break\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n internal_date = db.Column(db.DateTime, nullable=False)\n snippet = db.Column(db.Text)\n sender = db.Column(db.Text)\n recipient = db.Column(db.Text)\n cc = db.Column(db.Text)\n bcc = db.Column(db.Text)\n subject = db.Column(db.Text)\n thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=\n False)\n\n\nclass Thread(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n snippet = db.Column(db.Text)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n messages = db.relationship('Message', backref='thread', lazy='dynamic')\n",
"step-4": "from datetime import datetime\nimport httplib2\nfrom apiclient.discovery import build\nfrom flask_login import UserMixin\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom oauth2client.client import OAuth2Credentials\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.types import ARRAY\nfrom app import app\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.Text)\n history_id = db.Column(db.Integer)\n customer_label_id = db.Column(db.Text)\n credentials_json = db.Column(JSONB)\n threads = db.relationship('Thread', backref='user', lazy='dynamic')\n\n def __repr__(self):\n return '<User {}>'.format(self.email)\n\n @property\n def credentials(self):\n if self.credentials_json:\n return OAuth2Credentials.from_json(self.credentials_json)\n else:\n return None\n\n @credentials.setter\n def credentials(self, cred):\n if type(cred) is OAuth2Credentials:\n self.credentials_json = cred.to_json()\n else:\n self.credentials_json = cred\n\n @property\n def gmail(self):\n http = self.credentials.authorize(httplib2.Http())\n return build('gmail', 'v1', http=http)\n\n def sync_inbox(self):\n labels = self.gmail.users().labels().list(userId='me').execute()[\n 'labels']\n if len([label for label in labels if label['name'] == 'Growth']) == 0:\n raise Exception('No Growth label found')\n for label in labels:\n if label['name'] == 'Growth':\n self.customer_label_id = label['id']\n db.session.add(self)\n db.session.commit()\n next_page_token = None\n while True:\n thread_result = self.gmail.users().threads().list(userId='me',\n labelIds=self.customer_label_id, pageToken=next_page_token\n ).execute()\n for thread in thread_result['threads']:\n for message in self.gmail.users().threads().get(userId='me',\n id=thread['id']).execute()['messages']:\n data = self.gmail.users().messages().get(userId='me',\n id=message['id'], format='metadata').execute()\n msg = Message(gmail_id=data['id'], internal_date=\n datetime.fromtimestamp(int(data['internalDate']) / \n 1000.0), snippet=data['snippet'], subject=[x for x in\n data['payload']['headers'] if x['name'] ==\n 'Subject'][0]['value'], sender=[x for x in data[\n 'payload']['headers'] if x['name'] == 'From'][0][\n 'value'], recipient=[x for x in data['payload'][\n 'headers'] if x['name'] == 'To'][0]['value'])\n thread = Thread.query.filter_by(gmail_id=data['threadId']\n ).first()\n if not thread:\n thread = Thread(gmail_id=data['threadId'], user_id=\n self.id)\n msg.thread = thread\n db.session.add(msg)\n db.session.add(thread)\n if thread_result.get('nextPageToken'):\n next_page_token = thread_result['nextPageToken']\n else:\n db.session.commit()\n break\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n internal_date = db.Column(db.DateTime, nullable=False)\n snippet = db.Column(db.Text)\n sender = db.Column(db.Text)\n recipient = db.Column(db.Text)\n cc = db.Column(db.Text)\n bcc = db.Column(db.Text)\n subject = db.Column(db.Text)\n thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=\n False)\n\n\nclass Thread(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n snippet = db.Column(db.Text)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n messages = db.relationship('Message', backref='thread', lazy='dynamic')\n",
"step-5": "from datetime import datetime\nimport httplib2\n\nfrom apiclient.discovery import build\nfrom flask_login import UserMixin\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom oauth2client.client import OAuth2Credentials\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.types import ARRAY\n\nfrom app import app\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.Text)\n history_id = db.Column(db.Integer)\n customer_label_id = db.Column(db.Text)\n credentials_json = db.Column(JSONB)\n\n threads = db.relationship('Thread', backref='user', lazy='dynamic')\n\n def __repr__(self):\n return '<User {}>'.format(self.email)\n\n @property\n def credentials(self):\n if self.credentials_json:\n return OAuth2Credentials.from_json(self.credentials_json)\n else:\n return None\n\n @credentials.setter\n def credentials(self, cred):\n if type(cred) is OAuth2Credentials:\n self.credentials_json = cred.to_json()\n else:\n self.credentials_json = cred\n\n @property\n def gmail(self):\n http = self.credentials.authorize(httplib2.Http())\n return build('gmail', 'v1', http=http)\n\n def sync_inbox(self):\n labels = self.gmail.users().labels().list(userId='me').execute()['labels']\n if len([label for label in labels if label['name'] == 'Growth']) == 0:\n raise Exception('No Growth label found')\n\n for label in labels:\n if label['name'] == 'Growth':\n self.customer_label_id = label['id']\n\n db.session.add(self)\n db.session.commit()\n\n next_page_token = None\n while True:\n thread_result = self.gmail.users().threads().list(userId='me', labelIds=self.customer_label_id, pageToken=next_page_token).execute()\n for thread in thread_result['threads']:\n\n for message in self.gmail.users().threads().get(userId='me', id=thread['id']).execute()['messages']:\n data = self.gmail.users().messages().get(userId='me', id=message['id'], format='metadata').execute()\n\n msg = Message(\n gmail_id=data['id'],\n internal_date=datetime.fromtimestamp(int(data['internalDate']) / 1e3),\n snippet=data['snippet'],\n subject=[x for x in data['payload']['headers'] if x['name'] == 'Subject'][0]['value'],\n sender=[x for x in data['payload']['headers'] if x['name'] == 'From'][0]['value'],\n recipient=[x for x in data['payload']['headers'] if x['name'] == 'To'][0]['value'],\n )\n thread = Thread.query.filter_by(gmail_id=data['threadId']).first()\n if not thread:\n thread = Thread(gmail_id=data['threadId'], user_id=self.id,)\n msg.thread = thread\n db.session.add(msg)\n db.session.add(thread)\n\n if thread_result.get('nextPageToken'):\n next_page_token = thread_result['nextPageToken']\n else:\n db.session.commit()\n break\n\n # pull history_id\n # save latest\n # setup notifications\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n internal_date = db.Column(db.DateTime, nullable=False)\n snippet = db.Column(db.Text)\n\n sender = db.Column(db.Text)\n recipient = db.Column(db.Text)\n cc = db.Column(db.Text)\n bcc = db.Column(db.Text)\n subject = db.Column(db.Text)\n\n thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=False)\n\n\nclass Thread(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n gmail_id = db.Column(db.Text)\n snippet = db.Column(db.Text)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n messages = db.relationship('Message', backref='thread', lazy='dynamic')\n",
"step-ids": [
8,
10,
11,
13,
14
]
}
|
[
8,
10,
11,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(model.summary())
<|reserved_special_token_0|>
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
X, y = dataset_maker(window=5, forecast_day=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
shuffle=False)
model = model_3((5, 8, 20, 6))
print(model.summary())
history = model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=5, epochs=30, verbose=2, shuffle=False)
y_pred = model.predict(X_test)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from WeatherDL.data_maker import dataset_maker
from WeatherDL.model_maker import model_3
X, y = dataset_maker(window=5, forecast_day=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
shuffle=False)
model = model_3((5, 8, 20, 6))
print(model.summary())
history = model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=5, epochs=30, verbose=2, shuffle=False)
y_pred = model.predict(X_test)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from WeatherDL.data_maker import dataset_maker
from WeatherDL.model_maker import model_3
# Extract data from data_maker
X, y = dataset_maker(window=5, forecast_day=1)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)
# Open model from model_maker
model = model_3((5, 8, 20, 6))
print(model.summary())
# Fit model, and extract training & validation metrics
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=5,
epochs=30,
verbose=2,
shuffle=False)
# Prediction
y_pred = model.predict(X_test)
# Data Visualization
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
|
flexible
|
{
"blob_id": "011dd579bb076ec094e9e3085aa321883c484f1c",
"index": 5296,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(model.summary())\n<mask token>\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-3": "<mask token>\nX, y = dataset_maker(window=5, forecast_day=1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n shuffle=False)\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test),\n batch_size=5, epochs=30, verbose=2, shuffle=False)\ny_pred = model.predict(X_test)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\nX, y = dataset_maker(window=5, forecast_day=1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n shuffle=False)\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test),\n batch_size=5, epochs=30, verbose=2, shuffle=False)\ny_pred = model.predict(X_test)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\n\n# Extract data from data_maker\nX, y = dataset_maker(window=5, forecast_day=1)\n(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)\n\n# Open model from model_maker\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\n\n# Fit model, and extract training & validation metrics\nhistory = model.fit(X_train, y_train,\n validation_data=(X_test, y_test),\n batch_size=5,\n epochs=30,\n verbose=2,\n shuffle=False)\n\n# Prediction\ny_pred = model.predict(X_test)\n\n# Data Visualization\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import os
import uuid
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import FileAttachment, FileProxy, JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_field_order"
name = "TestFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_no_field_order"
name = "TestNoFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_pass"
name = "TestReadOnlyPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_fail"
name = "TestReadOnlyFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
# Also ensure the standard log message about aborting the transaction is *not* present
self.assertNotEqual(
job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error."
)
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_no_commit_field"
name = "TestReadOnlyNoCommitField"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>""",
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_ipaddress_vars"
name = "TestIPAddresses"
job_class = get_job(f"local/{module}/{name}")
# Fill out the form
form_data = dict(
ipv4_address="1.2.3.4",
ipv4_with_mask="1.2.3.4/32",
ipv4_network="1.2.3.0/24",
ipv6_address="2001:db8::1",
ipv6_with_mask="2001:db8::1/64",
ipv6_network="2001:db8::/64",
)
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
# Prepare the job data
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
data = job_class.serialize_data(form.cleaned_data)
# Run the job and extract the job payload data
run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data["run"]["log"][0][2] # Indexing makes me sad.
job_result_data = json.loads(job_payload)
# Assert stuff
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b"I am content.\n"
cls.dummy_file = SimpleUploadedFile(name="dummy.txt", content=cls.file_contents)
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def setUp(self):
self.dummy_file.seek(0) # Reset cursor so we can read it again.
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_pass/TestFileUploadPass"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_fail/TestFileUploadFail"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Also ensure the standard log message about aborting the transaction is present
self.assertEqual(job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error.")
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
|
normal
|
{
"blob_id": "d2298ad1e4737b983ba6d1f2fff59750137510b5",
"index": 904,
"step-1": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <mask token>\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n <mask token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <mask token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-2": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n <mask token>\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-3": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-4": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-5": "import json\nimport os\nimport uuid\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.dcim.models import Site\nfrom nautobot.extras.choices import JobResultStatusChoices\nfrom nautobot.extras.jobs import get_job, run_job\nfrom nautobot.extras.models import FileAttachment, FileProxy, JobResult\nfrom nautobot.utilities.testing import TestCase\n\n\nclass JobTest(TestCase):\n \"\"\"\n Test basic jobs to ensure importing works.\n \"\"\"\n\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_pass\"\n name = \"TestPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_fail\"\n name = \"TestFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_field_order\"\n name = \"TestFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_no_field_order\"\n name = \"TestNoFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_pass\"\n name = \"TestReadOnlyPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_fail\"\n name = \"TestReadOnlyFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n # Also ensure the standard log message about aborting the transaction is *not* present\n self.assertNotEqual(\n job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\"\n )\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_no_commit_field\"\n name = \"TestReadOnlyNoCommitField\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\",\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_ipaddress_vars\"\n name = \"TestIPAddresses\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n # Fill out the form\n form_data = dict(\n ipv4_address=\"1.2.3.4\",\n ipv4_with_mask=\"1.2.3.4/32\",\n ipv4_network=\"1.2.3.0/24\",\n ipv6_address=\"2001:db8::1\",\n ipv6_with_mask=\"2001:db8::1/64\",\n ipv6_network=\"2001:db8::/64\",\n )\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n\n # Prepare the job data\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n data = job_class.serialize_data(form.cleaned_data)\n\n # Run the job and extract the job payload data\n run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data[\"run\"][\"log\"][0][2] # Indexing makes me sad.\n job_result_data = json.loads(job_payload)\n\n # Assert stuff\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b\"I am content.\\n\"\n cls.dummy_file = SimpleUploadedFile(name=\"dummy.txt\", content=cls.file_contents)\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def setUp(self):\n self.dummy_file.seek(0) # Reset cursor so we can read it again.\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_pass/TestFileUploadPass\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_fail/TestFileUploadFail\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n # Also ensure the standard log message about aborting the transaction is present\n self.assertEqual(job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\")\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-ids": [
10,
15,
16,
17,
20
]
}
|
[
10,
15,
16,
17,
20
] |
# © MNELAB developers
#
# License: BSD (3-clause)
from .dependencies import have
from .syntax import PythonHighlighter
from .utils import count_locations, image_path, interface_style, natural_sort
|
normal
|
{
"blob_id": "837534ebc953dae966154921709398ab2b2e0b33",
"index": 578,
"step-1": "<mask token>\n",
"step-2": "from .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"step-3": "# © MNELAB developers\n#\n# License: BSD (3-clause)\n\nfrom .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
tej="votary"
for i in range(5):
print(tej[i])
|
normal
|
{
"blob_id": "1f385fda1bdc0008ff91b935998c95c8ffcbd297",
"index": 2797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n print(tej[i])\n",
"step-3": "tej = 'votary'\nfor i in range(5):\n print(tej[i])\n",
"step-4": "tej=\"votary\"\nfor i in range(5):\n\tprint(tej[i])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
class ListingForm(forms.Form):
text = forms.CharField(
max_length=50,
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Things to Buy"}
),
)
|
normal
|
{
"blob_id": "3f23a50f44ba17c9b0241a4e3b0e939afeb1f5f0",
"index": 3092,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListingForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(max_length=50, widget=forms.TextInput(attrs={\n 'class': 'form-control', 'placeholder': 'Things to Buy'}))\n",
"step-4": "from django import forms\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(max_length=50, widget=forms.TextInput(attrs={\n 'class': 'form-control', 'placeholder': 'Things to Buy'}))\n",
"step-5": "from django import forms\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(\n max_length=50,\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Things to Buy\"}\n ),\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for x in data:
if x < min:
min = x
print(min)
<|reserved_special_token_1|>
data = [5, 6, 2, 8, 9, 1]
min = 10
for x in data:
if x < min:
min = x
print(min)
<|reserved_special_token_1|>
#딕셔너리로 데이터 표현
# sales = {'hong':0,'lee':0,'park':0}
# d = {'z':10, 'b':20,'c':30}
# print(d)
# d.pop('b')
# print(d)
# d['f']=40
# print(d)
# d.pop('z')
# d['z'] = 40
# print(d.keys())
#반복문(while)
#조건이 참일동안 수행
#while True:
# print('python!!!')
# a = 0
# while a < 10:
# a += 1
# print(a)
# a = 0
# while True:
# a +=1
# print(a)
# if a>=10:
# break
#1부터 1씩증가하는 숫자의 합이 1000초과시 숫자 출력
# a = 0 #1씩 증가하는 수
# s = 0 #합계를 저장할 변수
# while True:
# a +=1
# s +=a
# if s>=1000: break
#
# print('누적값:' , s)
# print('마지막숫자:', a)
#실습)사용자에게 숫자를 입력을 받아서 출력
#사용자가 0을 입력하면 프로그램 종료
#1)
# while True:
# num=int(input('숫자는?'))
# if num ==0: break
# print('입력숫자:', num)
#
# #2)
# #num = 1
#
# #사용자가 q를 입력하면 반복문 종료
# s=0
# while True:
# num = input('숫자는(q:종료)?')
# if num=='q': break
# s +=int(num)
#
# print('누적합계', s)
#실습4
#숫자 두 개와 기호를 입력 받아 계산기 프로그램을 만들어 봅시다.
#단, 사용자가 q를 입력하면 계산기 종료
# while True:
# num = input('첫 번째 숫자 입력(q:종료)')
# if num=='num': break
# num1 = input('두 번째 숫자 입력(q:종료)')
# sign = input('기호는?')
# if sign =='+':
# print('더하기',num+num1)
# elif sign =='-':
# print('빼기:',num-num1)
# elif sign == '*':
# print('곱하기:', num * num1)
# elif sign == '/':
# print('나누기:', num / num1)
# else:
# print('잘못된 기호')
# while True:
# a = input('first:')
# b = input('second:')
# sign = input('sign:')
# if sign == '+':
# print('더하기:', a+b)
#
# if sign == '-':
# print('빼기:', a-b)
#2)
# while True:
# cal = input('계산식은?').split()
# #print(cal)
# if cal[0]=='q': break
# a,sign,b = cal #언패킹
# a=int(a); b =int(b)
# if sign == '+':
# print('더하기', a + b)
# elif sign == '-':
# print('빼기:', a - b)
# elif sign == '*':
# print('곱하기:', a * b)
# elif sign == '/':
# print('나누기:', a / b)
# else:
# print('잘못된 기호')
#실습) 가장 큰수 찾기
# data=[5,6,2,8,9,1]
# max = 0
# for x in data:
# if x > max:
# max=x
#
# print(max)
#실습) 가장 작은수 찾기
data=[5,6,2,8,9,1]
min=10
for x in data:
if x < min:
min=x
print(min)
|
flexible
|
{
"blob_id": "38bd18e9c1d17f25c10321ab561372eed58e8abc",
"index": 4243,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in data:\n if x < min:\n min = x\nprint(min)\n",
"step-3": "data = [5, 6, 2, 8, 9, 1]\nmin = 10\nfor x in data:\n if x < min:\n min = x\nprint(min)\n",
"step-4": "#딕셔너리로 데이터 표현\n# sales = {'hong':0,'lee':0,'park':0}\n# d = {'z':10, 'b':20,'c':30}\n# print(d)\n# d.pop('b')\n# print(d)\n# d['f']=40\n# print(d)\n# d.pop('z')\n# d['z'] = 40\n# print(d.keys())\n#반복문(while)\n#조건이 참일동안 수행\n#while True:\n# print('python!!!')\n\n\n# a = 0\n# while a < 10:\n# a += 1\n# print(a)\n\n# a = 0\n# while True:\n# a +=1\n# print(a)\n# if a>=10:\n# break\n\n#1부터 1씩증가하는 숫자의 합이 1000초과시 숫자 출력\n# a = 0 #1씩 증가하는 수\n# s = 0 #합계를 저장할 변수\n# while True:\n# a +=1\n# s +=a\n# if s>=1000: break\n#\n# print('누적값:' , s)\n# print('마지막숫자:', a)\n\n#실습)사용자에게 숫자를 입력을 받아서 출력\n\n#사용자가 0을 입력하면 프로그램 종료\n#1)\n# while True:\n# num=int(input('숫자는?'))\n# if num ==0: break\n# print('입력숫자:', num)\n#\n# #2)\n# #num = 1\n#\n# #사용자가 q를 입력하면 반복문 종료\n# s=0\n# while True:\n# num = input('숫자는(q:종료)?')\n# if num=='q': break\n# s +=int(num)\n#\n# print('누적합계', s)\n\n#실습4\n#숫자 두 개와 기호를 입력 받아 계산기 프로그램을 만들어 봅시다.\n#단, 사용자가 q를 입력하면 계산기 종료\n# while True:\n# num = input('첫 번째 숫자 입력(q:종료)')\n# if num=='num': break\n# num1 = input('두 번째 숫자 입력(q:종료)')\n# sign = input('기호는?')\n# if sign =='+':\n# print('더하기',num+num1)\n# elif sign =='-':\n# print('빼기:',num-num1)\n# elif sign == '*':\n# print('곱하기:', num * num1)\n# elif sign == '/':\n# print('나누기:', num / num1)\n# else:\n# print('잘못된 기호')\n\n\n # while True:\n# a = input('first:')\n# b = input('second:')\n# sign = input('sign:')\n# if sign == '+':\n# print('더하기:', a+b)\n#\n# if sign == '-':\n# print('빼기:', a-b)\n\n#2)\n# while True:\n# cal = input('계산식은?').split()\n# #print(cal)\n# if cal[0]=='q': break\n# a,sign,b = cal #언패킹\n# a=int(a); b =int(b)\n# if sign == '+':\n# print('더하기', a + b)\n# elif sign == '-':\n# print('빼기:', a - b)\n# elif sign == '*':\n# print('곱하기:', a * b)\n# elif sign == '/':\n# print('나누기:', a / b)\n# else:\n# print('잘못된 기호')\n\n#실습) 가장 큰수 찾기\n# data=[5,6,2,8,9,1]\n# max = 0\n# for x in data:\n# if x > max:\n# max=x\n#\n# print(max)\n\n#실습) 가장 작은수 찾기\ndata=[5,6,2,8,9,1]\nmin=10\nfor x in data:\n if x < min:\n min=x\nprint(min)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_cave_details(details):
aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'
) | Literal('NW')
aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')
aliquotHalfString = oneOf('N E S W') + Suppress('1/2')
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)
).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])
)
sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))
sectionNumber = Word(nums)
section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'
) + sectionNumber)).setResultsName('section')
afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |
sectionToken)
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
townshipDirection = oneOf('N S').setResultsName('direction')
townshipNumber = Word(nums).setResultsName('number')
township = Suppress('T.') + Group(townshipNumber + townshipDirection
).setResultsName('township') + Suppress('.')
rangeDirection = oneOf('E W').setResultsName('direction')
rangeNumber = Word(nums).setResultsName('number')
range_info = Suppress('R.') + Group(rangeNumber + rangeDirection
).setResultsName('range') + Suppress('.')
countyKeyword = Literal('County')
countyName = Group(OneOrMore(~countyKeyword + Word(alphas + "-'."))
).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))
county = countyName + Suppress('County')
notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda
x: False)
shownOnQuad = Literal('S').setParseAction(lambda x: True)
onKeyword = Literal('on')
mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(
lambda alias: ' '.join(alias[0])).setResultsName('alias')
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(
'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias
) + Suppress(onKeyword)
quadrangleKeyword = Literal('Quadrangle') + Literal('map')
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + "-'."))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'
) + Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables))
).setResultsName('description').setParseAction(lambda desc: ' '.
join(desc[0]))
location = caveName + aliquotPart + section + Suppress(','
) + township + Suppress(',') + range_info + Suppress(','
) + county + quadrangle + description
return location.parseString(details)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_cave_details(details):
aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'
) | Literal('NW')
aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')
aliquotHalfString = oneOf('N E S W') + Suppress('1/2')
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)
).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])
)
sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))
sectionNumber = Word(nums)
section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'
) + sectionNumber)).setResultsName('section')
afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |
sectionToken)
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
townshipDirection = oneOf('N S').setResultsName('direction')
townshipNumber = Word(nums).setResultsName('number')
township = Suppress('T.') + Group(townshipNumber + townshipDirection
).setResultsName('township') + Suppress('.')
rangeDirection = oneOf('E W').setResultsName('direction')
rangeNumber = Word(nums).setResultsName('number')
range_info = Suppress('R.') + Group(rangeNumber + rangeDirection
).setResultsName('range') + Suppress('.')
countyKeyword = Literal('County')
countyName = Group(OneOrMore(~countyKeyword + Word(alphas + "-'."))
).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))
county = countyName + Suppress('County')
notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda
x: False)
shownOnQuad = Literal('S').setParseAction(lambda x: True)
onKeyword = Literal('on')
mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(
lambda alias: ' '.join(alias[0])).setResultsName('alias')
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(
'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias
) + Suppress(onKeyword)
quadrangleKeyword = Literal('Quadrangle') + Literal('map')
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + "-'."))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'
) + Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables))
).setResultsName('description').setParseAction(lambda desc: ' '.
join(desc[0]))
location = caveName + aliquotPart + section + Suppress(','
) + township + Suppress(',') + range_info + Suppress(','
) + county + quadrangle + description
return location.parseString(details)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('ERROR: pass in the filename as the second argument.')
print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))
exit()
filepath = sys.argv[1]
with open(filepath) as f:
raw_text = f.read()
raw_caves = raw_text.split('\n')
caves = []
for raw_cave_text in raw_caves:
raw_cave_text = raw_cave_text.strip()
if raw_cave_text:
try:
cave = parse_cave_details(raw_cave_text)
caves.append({'Cave name': cave.name, 'Alias': cave.quad.
alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad
.name, 'County': cave.county, 'State': 'MO',
'Principal Meridian Code': 5, 'Township Number': cave.
township.number, 'Township Fraction': 0,
'Township Direction': cave.township.direction,
'Range Number': cave.range.number, 'Range Fraction': 0,
'Range Direction': cave.range.direction, 'Section':
cave.section[0], 'Section Division': ''.join(cave.
aliquot), 'Township Duplicate': 0, 'Description':
raw_cave_text})
except:
print('=' * 80)
print('ERROR: unexpected format for {0}'.format(cave.name))
print(raw_cave_text)
import traceback
print(traceback.format_exc())
print('\t' + '\n\t'.join([str(x) for x in sys.exc_info()]))
print('Skipping this cave for the next one')
else:
sections = ' or '.join(cave.section)
output_path = os.path.basename(filepath).split('.')[0] + '.csv'
print('#' * 80)
print("{0} caves processed! Saving to '{1}'.".format(len(caves),
output_path))
with open(output_path, 'wb') as f:
cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())
try:
cave_csv.writeheader()
except:
header = {}
for k in caves[0].keys():
header[k] = k
cave_csv.writerow(header)
cave_csv.writerows(caves)
<|reserved_special_token_1|>
import sys
import os
from pyparsing import *
import csv
def parse_cave_details(details):
aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'
) | Literal('NW')
aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')
aliquotHalfString = oneOf('N E S W') + Suppress('1/2')
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)
).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])
)
sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))
sectionNumber = Word(nums)
section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'
) + sectionNumber)).setResultsName('section')
afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |
sectionToken)
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
townshipDirection = oneOf('N S').setResultsName('direction')
townshipNumber = Word(nums).setResultsName('number')
township = Suppress('T.') + Group(townshipNumber + townshipDirection
).setResultsName('township') + Suppress('.')
rangeDirection = oneOf('E W').setResultsName('direction')
rangeNumber = Word(nums).setResultsName('number')
range_info = Suppress('R.') + Group(rangeNumber + rangeDirection
).setResultsName('range') + Suppress('.')
countyKeyword = Literal('County')
countyName = Group(OneOrMore(~countyKeyword + Word(alphas + "-'."))
).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))
county = countyName + Suppress('County')
notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda
x: False)
shownOnQuad = Literal('S').setParseAction(lambda x: True)
onKeyword = Literal('on')
mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(
lambda alias: ' '.join(alias[0])).setResultsName('alias')
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(
'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias
) + Suppress(onKeyword)
quadrangleKeyword = Literal('Quadrangle') + Literal('map')
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + "-'."))
).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'
) + Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables))
).setResultsName('description').setParseAction(lambda desc: ' '.
join(desc[0]))
location = caveName + aliquotPart + section + Suppress(','
) + township + Suppress(',') + range_info + Suppress(','
) + county + quadrangle + description
return location.parseString(details)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('ERROR: pass in the filename as the second argument.')
print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))
exit()
filepath = sys.argv[1]
with open(filepath) as f:
raw_text = f.read()
raw_caves = raw_text.split('\n')
caves = []
for raw_cave_text in raw_caves:
raw_cave_text = raw_cave_text.strip()
if raw_cave_text:
try:
cave = parse_cave_details(raw_cave_text)
caves.append({'Cave name': cave.name, 'Alias': cave.quad.
alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad
.name, 'County': cave.county, 'State': 'MO',
'Principal Meridian Code': 5, 'Township Number': cave.
township.number, 'Township Fraction': 0,
'Township Direction': cave.township.direction,
'Range Number': cave.range.number, 'Range Fraction': 0,
'Range Direction': cave.range.direction, 'Section':
cave.section[0], 'Section Division': ''.join(cave.
aliquot), 'Township Duplicate': 0, 'Description':
raw_cave_text})
except:
print('=' * 80)
print('ERROR: unexpected format for {0}'.format(cave.name))
print(raw_cave_text)
import traceback
print(traceback.format_exc())
print('\t' + '\n\t'.join([str(x) for x in sys.exc_info()]))
print('Skipping this cave for the next one')
else:
sections = ' or '.join(cave.section)
output_path = os.path.basename(filepath).split('.')[0] + '.csv'
print('#' * 80)
print("{0} caves processed! Saving to '{1}'.".format(len(caves),
output_path))
with open(output_path, 'wb') as f:
cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())
try:
cave_csv.writeheader()
except:
header = {}
for k in caves[0].keys():
header[k] = k
cave_csv.writerow(header)
cave_csv.writerows(caves)
<|reserved_special_token_1|>
import sys
import os
from pyparsing import *
import csv
def parse_cave_details(details):
##########################################################################
# Define the Bretz Grammar.
# Sample cave description:
# Boring Caverns SE1/4 NW1/4 sec. 16, T. 37 N., R. 10 W., Pulaski County Not shown on Waynesville Quadrangle map The mouth of this cave ...\n
# Another Cave S1/2 sec. 15, T. 36 N., R. 12 W., Pulaski County Not shown on Waynesville Quadrangle map There are two large caves...\n
# Something Bridge Sec. 15 or 22, T. 36 N., R. 13 W., Pulaski County Not shown on Richland Quadrangle map This cave is near Ozark...\n
#
# CAVE ::= CAVE_NAME [ALIQUOT_PART] SECTION, TOWNSHIP, RANGE, COUNTY QUAD_MAP DESCRIPTION
# ALIQUOT_PART ::= (((NE|SE|SW|NW)1/4)|((N|E|S|W)1/2))*
# SECTION ::= (S|s)ec. num+
# TOWNSHIP ::= T. num+ TOWNSHIP_DIR.
# TOWNSHIP_DIR ::= N|S
# RANGE ::= R. num+ RANGE_DIR.
# RANGE_DIR ::= E|W
# COUNTY = WORD+ County
# QUAD_MAP = (Not s|S)hown on QUAD Quadrangle map
# QUAD = WORD+
# DESCRIPTION = WORD+
aliquotQuadrantID = Literal("NE") |\
Literal("SE") |\
Literal("SW") |\
Literal("NW")
aliquotQuadrantString = aliquotQuadrantID + Suppress("1/4")
aliquotHalfString = oneOf("N E S W") + Suppress("1/2")
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString))\
.setResultsName("aliquot")\
.setParseAction(lambda kwd: " ".join(kwd[0]))
sectionToken = Suppress(oneOf("S s") + Literal("ec") + Optional("."))
sectionNumber = Word(nums)
section = Group(
sectionToken \
+ sectionNumber \
+ ZeroOrMore(Suppress("or") + sectionNumber)
).setResultsName("section")
afterEndOfCaveName = aliquotHalfString | aliquotQuadrantString | sectionToken
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables)))\
.setResultsName('name')\
.setParseAction(lambda name: " ".join(name[0]))
townshipDirection = oneOf("N S").setResultsName("direction")
townshipNumber = Word(nums).setResultsName("number")
township = Suppress("T.") \
+ Group(townshipNumber + townshipDirection).setResultsName("township")\
+ Suppress('.')
rangeDirection = oneOf("E W").setResultsName("direction")
rangeNumber = Word(nums).setResultsName("number")
range_info = Suppress("R.") \
+ Group(rangeNumber + rangeDirection).setResultsName("range")\
+ Suppress('.')
countyKeyword = Literal("County")
countyName = Group(OneOrMore(~countyKeyword + Word(alphas+"-'.")))\
.setResultsName("county")\
.setParseAction(lambda c: " ".join(c[0]))
county = countyName + Suppress("County")
notShownOnQuad = (Literal("Not") + Suppress("s"))\
.setParseAction(lambda x: False)
shownOnQuad = Literal("S").setParseAction(lambda x: True)
onKeyword = Literal("on")
mapAlias = Group(OneOrMore(~onKeyword + Word(printables)))\
.setParseAction(lambda alias: " ".join(alias[0]))\
.setResultsName("alias")
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName("is_on_map")\
+ Suppress("hown") \
+ Optional(Suppress('as') + mapAlias)\
+ Suppress(onKeyword)
quadrangleKeyword = Literal("Quadrangle") + Literal("map")
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas+"-'.")))\
.setResultsName("name")\
.setParseAction(lambda name: " ".join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName("quad") \
+ Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables)))\
.setResultsName("description")\
.setParseAction(lambda desc: " ".join(desc[0]))
location = caveName \
+ aliquotPart \
+ section + Suppress(',') \
+ township + Suppress(',') \
+ range_info + Suppress(',')\
+ county \
+ quadrangle \
+ description
return location.parseString(details)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERROR: pass in the filename as the second argument.")
print(" $ python {0} /path/to/file.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
with open(filepath) as f:
raw_text = f.read()
raw_caves = raw_text.split("\n")
caves = []
for raw_cave_text in raw_caves:
raw_cave_text = raw_cave_text.strip()
if raw_cave_text:
try:
cave = parse_cave_details(raw_cave_text)
caves.append({
'Cave name': cave.name,
'Alias': cave.quad.alias,
'On map': cave.quad.is_on_map,
'Quad': cave.quad.name,
'County': cave.county,
'State': 'MO',
'Principal Meridian Code': 5,
'Township Number': cave.township.number,
'Township Fraction': 0,
'Township Direction': cave.township.direction,
'Range Number': cave.range.number,
'Range Fraction': 0,
'Range Direction': cave.range.direction,
'Section': cave.section[0],
'Section Division': "".join(cave.aliquot),
'Township Duplicate': 0,
'Description': raw_cave_text,
})
except:
print("="*80)
print("ERROR: unexpected format for {0}".format(cave.name))
print(raw_cave_text)
import traceback
print(traceback.format_exc())
print("\t" + "\n\t".join([str(x) for x in sys.exc_info()]))
print("Skipping this cave for the next one")
else:
sections = " or ".join(cave.section)
#print("="*80)
#print("{1} := {0.aliquot} Sect. {2}, T. {0.township.number} {0.township.direction}., R. {0.range.number} {0.range.direction}., in {0.county} County on the {0.quad.name} quad map.".format(cave, cave.name, sections))
#print(" Marked on map as {0}".format(cave.quad.alias if cave.quad.alias else cave.name) if cave.quad.is_on_map else " Not on map")
output_path = os.path.basename(filepath).split(".")[0] + ".csv"
print("#"*80)
print("{0} caves processed! Saving to '{1}'.".format(len(caves), output_path))
with open(output_path, 'wb') as f:
cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())
try:
cave_csv.writeheader()
except: # Versions before 2.7 of Python do not have csv with writeheader().
header = {}
for k in caves[0].keys():
header[k] = k
cave_csv.writerow(header)
cave_csv.writerows(caves)
|
flexible
|
{
"blob_id": "1fc1d2e1a7d18b1ef8ee6396210afe47a63ab09f",
"index": 3267,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('ERROR: pass in the filename as the second argument.')\n print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))\n exit()\n filepath = sys.argv[1]\n with open(filepath) as f:\n raw_text = f.read()\n raw_caves = raw_text.split('\\n')\n caves = []\n for raw_cave_text in raw_caves:\n raw_cave_text = raw_cave_text.strip()\n if raw_cave_text:\n try:\n cave = parse_cave_details(raw_cave_text)\n caves.append({'Cave name': cave.name, 'Alias': cave.quad.\n alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad\n .name, 'County': cave.county, 'State': 'MO',\n 'Principal Meridian Code': 5, 'Township Number': cave.\n township.number, 'Township Fraction': 0,\n 'Township Direction': cave.township.direction,\n 'Range Number': cave.range.number, 'Range Fraction': 0,\n 'Range Direction': cave.range.direction, 'Section':\n cave.section[0], 'Section Division': ''.join(cave.\n aliquot), 'Township Duplicate': 0, 'Description':\n raw_cave_text})\n except:\n print('=' * 80)\n print('ERROR: unexpected format for {0}'.format(cave.name))\n print(raw_cave_text)\n import traceback\n print(traceback.format_exc())\n print('\\t' + '\\n\\t'.join([str(x) for x in sys.exc_info()]))\n print('Skipping this cave for the next one')\n else:\n sections = ' or '.join(cave.section)\n output_path = os.path.basename(filepath).split('.')[0] + '.csv'\n print('#' * 80)\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves),\n output_path))\n with open(output_path, 'wb') as f:\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\n try:\n cave_csv.writeheader()\n except:\n header = {}\n for k in caves[0].keys():\n header[k] = k\n cave_csv.writerow(header)\n cave_csv.writerows(caves)\n",
"step-4": "import sys\nimport os\nfrom pyparsing import *\nimport csv\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('ERROR: pass in the filename as the second argument.')\n print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))\n exit()\n filepath = sys.argv[1]\n with open(filepath) as f:\n raw_text = f.read()\n raw_caves = raw_text.split('\\n')\n caves = []\n for raw_cave_text in raw_caves:\n raw_cave_text = raw_cave_text.strip()\n if raw_cave_text:\n try:\n cave = parse_cave_details(raw_cave_text)\n caves.append({'Cave name': cave.name, 'Alias': cave.quad.\n alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad\n .name, 'County': cave.county, 'State': 'MO',\n 'Principal Meridian Code': 5, 'Township Number': cave.\n township.number, 'Township Fraction': 0,\n 'Township Direction': cave.township.direction,\n 'Range Number': cave.range.number, 'Range Fraction': 0,\n 'Range Direction': cave.range.direction, 'Section':\n cave.section[0], 'Section Division': ''.join(cave.\n aliquot), 'Township Duplicate': 0, 'Description':\n raw_cave_text})\n except:\n print('=' * 80)\n print('ERROR: unexpected format for {0}'.format(cave.name))\n print(raw_cave_text)\n import traceback\n print(traceback.format_exc())\n print('\\t' + '\\n\\t'.join([str(x) for x in sys.exc_info()]))\n print('Skipping this cave for the next one')\n else:\n sections = ' or '.join(cave.section)\n output_path = os.path.basename(filepath).split('.')[0] + '.csv'\n print('#' * 80)\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves),\n output_path))\n with open(output_path, 'wb') as f:\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\n try:\n cave_csv.writeheader()\n except:\n header = {}\n for k in caves[0].keys():\n header[k] = k\n cave_csv.writerow(header)\n cave_csv.writerows(caves)\n",
"step-5": "import sys\r\nimport os\r\nfrom pyparsing import *\r\nimport csv\r\n\r\n\r\ndef parse_cave_details(details):\r\n ##########################################################################\r\n # Define the Bretz Grammar.\r\n # Sample cave description:\r\n # Boring Caverns SE1/4 NW1/4 sec. 16, T. 37 N., R. 10 W., Pulaski County Not shown on Waynesville Quadrangle map The mouth of this cave ...\\n\r\n # Another Cave S1/2 sec. 15, T. 36 N., R. 12 W., Pulaski County Not shown on Waynesville Quadrangle map There are two large caves...\\n\r\n # Something Bridge Sec. 15 or 22, T. 36 N., R. 13 W., Pulaski County Not shown on Richland Quadrangle map This cave is near Ozark...\\n\r\n #\r\n # CAVE ::= CAVE_NAME [ALIQUOT_PART] SECTION, TOWNSHIP, RANGE, COUNTY QUAD_MAP DESCRIPTION\r\n # ALIQUOT_PART ::= (((NE|SE|SW|NW)1/4)|((N|E|S|W)1/2))*\r\n # SECTION ::= (S|s)ec. num+\r\n # TOWNSHIP ::= T. num+ TOWNSHIP_DIR.\r\n # TOWNSHIP_DIR ::= N|S\r\n # RANGE ::= R. num+ RANGE_DIR.\r\n # RANGE_DIR ::= E|W\r\n # COUNTY = WORD+ County\r\n # QUAD_MAP = (Not s|S)hown on QUAD Quadrangle map\r\n # QUAD = WORD+\r\n # DESCRIPTION = WORD+\r\n aliquotQuadrantID = Literal(\"NE\") |\\\r\n Literal(\"SE\") |\\\r\n Literal(\"SW\") |\\\r\n Literal(\"NW\")\r\n aliquotQuadrantString = aliquotQuadrantID + Suppress(\"1/4\")\r\n aliquotHalfString = oneOf(\"N E S W\") + Suppress(\"1/2\")\r\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString))\\\r\n .setResultsName(\"aliquot\")\\\r\n .setParseAction(lambda kwd: \" \".join(kwd[0]))\r\n\r\n sectionToken = Suppress(oneOf(\"S s\") + Literal(\"ec\") + Optional(\".\"))\r\n sectionNumber = Word(nums)\r\n section = Group(\r\n sectionToken \\\r\n + sectionNumber \\\r\n + ZeroOrMore(Suppress(\"or\") + sectionNumber)\r\n ).setResultsName(\"section\")\r\n\r\n afterEndOfCaveName = aliquotHalfString | aliquotQuadrantString | sectionToken\r\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables)))\\\r\n .setResultsName('name')\\\r\n .setParseAction(lambda name: \" \".join(name[0]))\r\n\r\n townshipDirection = oneOf(\"N S\").setResultsName(\"direction\")\r\n townshipNumber = Word(nums).setResultsName(\"number\")\r\n township = Suppress(\"T.\") \\\r\n + Group(townshipNumber + townshipDirection).setResultsName(\"township\")\\\r\n + Suppress('.')\r\n\r\n rangeDirection = oneOf(\"E W\").setResultsName(\"direction\")\r\n rangeNumber = Word(nums).setResultsName(\"number\")\r\n range_info = Suppress(\"R.\") \\\r\n + Group(rangeNumber + rangeDirection).setResultsName(\"range\")\\\r\n + Suppress('.')\r\n\r\n countyKeyword = Literal(\"County\")\r\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas+\"-'.\")))\\\r\n .setResultsName(\"county\")\\\r\n .setParseAction(lambda c: \" \".join(c[0]))\r\n county = countyName + Suppress(\"County\")\r\n\r\n notShownOnQuad = (Literal(\"Not\") + Suppress(\"s\"))\\\r\n .setParseAction(lambda x: False)\r\n shownOnQuad = Literal(\"S\").setParseAction(lambda x: True)\r\n onKeyword = Literal(\"on\")\r\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables)))\\\r\n .setParseAction(lambda alias: \" \".join(alias[0]))\\\r\n .setResultsName(\"alias\")\r\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\"is_on_map\")\\\r\n + Suppress(\"hown\") \\\r\n + Optional(Suppress('as') + mapAlias)\\\r\n + Suppress(onKeyword)\r\n quadrangleKeyword = Literal(\"Quadrangle\") + Literal(\"map\")\r\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas+\"-'.\")))\\\r\n .setResultsName(\"name\")\\\r\n .setParseAction(lambda name: \" \".join(name[0]))\r\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName(\"quad\") \\\r\n + Suppress(quadrangleKeyword)\r\n\r\n description = Group(ZeroOrMore(Word(alphanums + printables)))\\\r\n .setResultsName(\"description\")\\\r\n .setParseAction(lambda desc: \" \".join(desc[0]))\r\n\r\n location = caveName \\\r\n + aliquotPart \\\r\n + section + Suppress(',') \\\r\n + township + Suppress(',') \\\r\n + range_info + Suppress(',')\\\r\n + county \\\r\n + quadrangle \\\r\n + description\r\n\r\n return location.parseString(details)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) < 2:\r\n print(\"ERROR: pass in the filename as the second argument.\")\r\n print(\" $ python {0} /path/to/file.txt\".format(sys.argv[0]))\r\n exit()\r\n\r\n filepath = sys.argv[1]\r\n with open(filepath) as f:\r\n raw_text = f.read()\r\n\r\n raw_caves = raw_text.split(\"\\n\")\r\n caves = []\r\n for raw_cave_text in raw_caves:\r\n raw_cave_text = raw_cave_text.strip()\r\n if raw_cave_text:\r\n try:\r\n cave = parse_cave_details(raw_cave_text)\r\n caves.append({\r\n 'Cave name': cave.name,\r\n 'Alias': cave.quad.alias,\r\n 'On map': cave.quad.is_on_map,\r\n 'Quad': cave.quad.name,\r\n 'County': cave.county,\r\n 'State': 'MO',\r\n 'Principal Meridian Code': 5,\r\n 'Township Number': cave.township.number,\r\n 'Township Fraction': 0,\r\n 'Township Direction': cave.township.direction,\r\n 'Range Number': cave.range.number,\r\n 'Range Fraction': 0,\r\n 'Range Direction': cave.range.direction,\r\n 'Section': cave.section[0],\r\n 'Section Division': \"\".join(cave.aliquot),\r\n 'Township Duplicate': 0,\r\n 'Description': raw_cave_text,\r\n })\r\n\r\n except:\r\n print(\"=\"*80)\r\n print(\"ERROR: unexpected format for {0}\".format(cave.name))\r\n print(raw_cave_text)\r\n import traceback\r\n print(traceback.format_exc())\r\n print(\"\\t\" + \"\\n\\t\".join([str(x) for x in sys.exc_info()]))\r\n print(\"Skipping this cave for the next one\")\r\n else:\r\n sections = \" or \".join(cave.section)\r\n #print(\"=\"*80)\r\n #print(\"{1} := {0.aliquot} Sect. {2}, T. {0.township.number} {0.township.direction}., R. {0.range.number} {0.range.direction}., in {0.county} County on the {0.quad.name} quad map.\".format(cave, cave.name, sections))\r\n #print(\" Marked on map as {0}\".format(cave.quad.alias if cave.quad.alias else cave.name) if cave.quad.is_on_map else \" Not on map\")\r\n\r\n output_path = os.path.basename(filepath).split(\".\")[0] + \".csv\"\r\n print(\"#\"*80)\r\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves), output_path))\r\n with open(output_path, 'wb') as f:\r\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\r\n try:\r\n cave_csv.writeheader()\r\n \r\n except: # Versions before 2.7 of Python do not have csv with writeheader().\r\n header = {}\r\n for k in caves[0].keys():\r\n header[k] = k\r\n \r\n cave_csv.writerow(header)\r\n\r\n cave_csv.writerows(caves)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
<|reserved_special_token_0|>
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
<|reserved_special_token_0|>
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
"""Basically testing to see if the cache can store and recall info"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
if __name__ == '__main__':
test_1()
test_2()
test_3()
test_4()
<|reserved_special_token_1|>
from collections import OrderedDict
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
"""Basically testing to see if the cache can store and recall info"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
if __name__ == '__main__':
test_1()
test_2()
test_3()
test_4()
<|reserved_special_token_1|>
from collections import OrderedDict
class LRU_Cache(object):
def __init__(self, capacity):
# Initialize class variables
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
# Retrieve item from provided key. Return -1 if nonexistent.
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
# Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
'''Basically testing to see if the cache can store and recall info'''
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
'''testing to see if the least used object gets removed'''
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
'''entering null key to be set, should not work'''
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(f'Current Cache state: {our_cache} expected result is for it to be empty')
def test_4():
'''0 capacity test case'''
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(f'Current Cache state: {our_cache} expected result is for it to be empty')
if __name__ == "__main__":
test_1()
test_2()
test_3()
test_4()
|
flexible
|
{
"blob_id": "3c88e13e8796c5f39180a9a514f0528a074460a6",
"index": 2198,
"step-1": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\n<mask token>\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\n<mask token>\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n \"\"\"Basically testing to see if the cache can store and recall info\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\nif __name__ == '__main__':\n test_1()\n test_2()\n test_3()\n test_4()\n",
"step-4": "from collections import OrderedDict\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n \"\"\"Basically testing to see if the cache can store and recall info\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\nif __name__ == '__main__':\n test_1()\n test_2()\n test_3()\n test_4()\n",
"step-5": "from collections import OrderedDict\nclass LRU_Cache(object):\n def __init__(self, capacity):\n # Initialize class variables\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n # Retrieve item from provided key. Return -1 if nonexistent.\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n \n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n '''Basically testing to see if the cache can store and recall info'''\n our_cache = LRU_Cache(5)\n\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n '''testing to see if the least used object gets removed'''\n our_cache = LRU_Cache(5)\n\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5) \n\n our_cache.get(1)\n\n our_cache.set(6, 6)\n\n\n\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\ndef test_3():\n '''entering null key to be set, should not work'''\n our_cache = LRU_Cache(5)\n\n [our_cache.set(None, 1) for _ in range(5)]\n\n print(f'Current Cache state: {our_cache} expected result is for it to be empty')\n\ndef test_4():\n '''0 capacity test case'''\n our_cache = LRU_Cache(0)\n\n [our_cache.set(None, 1) for _ in range(5)]\n\n print(f'Current Cache state: {our_cache} expected result is for it to be empty')\n\n \n\nif __name__ == \"__main__\":\n test_1()\n test_2()\n test_3()\n test_4()\n",
"step-ids": [
6,
8,
10,
11,
12
]
}
|
[
6,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
class ListContact(ListView):
model = Contact
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
class ListContact(ListView):
model = Contact
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Home(TemplateView):
<|reserved_special_token_0|>
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
class ListContact(ListView):
model = Contact
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Home(TemplateView):
def get(self, request, *args, **kwargs):
return render_to_response('home.html')
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
class ListContact(ListView):
model = Contact
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.views.generic import TemplateView
from django.core.context_processors import csrf
from django.template import RequestContext
from django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View
from .models import Contact
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.shortcuts import render_to_response
# Create your views here.
#def home(request):
# posts = Post.objects.all()
# contexto = {'posts' : ''}
# return render_to_response("home.html" , contexto)
class Home(TemplateView):
def get(self, request , *args , **kwargs):
return render_to_response('home.html')
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
# return render_to_response("home.html" , contexto)
class ListContact(ListView):
model = Contact
|
flexible
|
{
"blob_id": "8a3694f96203ae8d1e306e1c9a5a47bfe26abeb1",
"index": 5178,
"step-1": "<mask token>\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-2": "<mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-3": "<mask token>\n\n\nclass Home(TemplateView):\n <mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-4": "<mask token>\n\n\nclass Home(TemplateView):\n\n def get(self, request, *args, **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.views.generic import TemplateView\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View\nfrom .models import Contact\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\n\n# Create your views here.\n\n#def home(request):\n # posts = Post.objects.all()\n# contexto = {'posts' : ''}\n# return render_to_response(\"home.html\" , contexto)\n\n\n\nclass Home(TemplateView):\n def get(self, request , *args , **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n # return render_to_response(\"home.html\" , contexto)\n\nclass ListContact(ListView):\n model = Contact\n\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .start_node import StartNode
from .character_appearance import CharacterAppearance
from .character_disappearance import CharacterDisappearance
from .replica import Replica
from .end_node import EndNode
from .choice import Choice
from .set_landscape import SetLandscape
from .add_item import AddItem
from .switch_by_item import SwitchByItem
|
flexible
|
{
"blob_id": "cd6e15daa2360ead47f0bac95843b1c030164996",
"index": 6879,
"step-1": "<mask token>\n",
"step-2": "from .start_node import StartNode\nfrom .character_appearance import CharacterAppearance\nfrom .character_disappearance import CharacterDisappearance\nfrom .replica import Replica\nfrom .end_node import EndNode\nfrom .choice import Choice\nfrom .set_landscape import SetLandscape\nfrom .add_item import AddItem\nfrom .switch_by_item import SwitchByItem\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver.get('http://192.168.1.248:9079/#/')
<|reserved_special_token_0|>
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'
print('符合要求')
except EOFError:
print('不是中文')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver = webdriver.Chrome()
driver.get('http://192.168.1.248:9079/#/')
lanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'
print('符合要求')
except EOFError:
print('不是中文')
<|reserved_special_token_1|>
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('http://192.168.1.248:9079/#/')
lanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'
print('符合要求')
except EOFError:
print('不是中文')
<|reserved_special_token_1|>
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("http://192.168.1.248:9079/#/")
lanuage = driver.find_element_by_class_name("el-dropdown-trigger-text")
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name("el-dropdown-trigger-text").text =="中文"
print("符合要求")
except EOFError:
print("不是中文")
# driver.find_element_by_link_text("简体中文")
|
flexible
|
{
"blob_id": "6a1f58af26bbc4d584ffd699c512ef433ffb80d8",
"index": 7206,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('http://192.168.1.248:9079/#/')\n<mask token>\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-4": "from selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-5": "from selenium import webdriver\n\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://192.168.1.248:9079/#/\")\n\n\nlanuage = driver.find_element_by_class_name(\"el-dropdown-trigger-text\")\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name(\"el-dropdown-trigger-text\").text ==\"中文\"\n print(\"符合要求\")\nexcept EOFError:\n print(\"不是中文\") \n# driver.find_element_by_link_text(\"简体中文\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print(10-10)
print(1000-80)
print(10/5)
print(10/6)
print(10//6) # remoção das casas decimais
print(10*800)
print(55*5)
|
normal
|
{
"blob_id": "e488761c15ee8cddbb7577d5340ee9001193c1a4",
"index": 4767,
"step-1": "<mask token>\n",
"step-2": "print(10 - 10)\nprint(1000 - 80)\nprint(10 / 5)\nprint(10 / 6)\nprint(10 // 6)\nprint(10 * 800)\nprint(55 * 5)\n",
"step-3": "print(10-10)\r\nprint(1000-80)\r\nprint(10/5)\r\nprint(10/6)\r\nprint(10//6) # remoção das casas decimais\r\n\r\nprint(10*800)\r\nprint(55*5)\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#Copyright (c) 2020 Ocado. All Rights Reserved.
import vptree, itertools
import numpy as np
class _ExtendedVPTree(vptree.VPTree):
"""
VPTree class extended to include the list of points within the tree
"""
def __init__(self, points, dist_fn):
"""
:param points: List of points to add to the vp-tree
:param dist_fn: Metric distance function
"""
super().__init__(points, dist_fn)
self.points = points
self.size = len(points)
def get_n_nearest_neighbors(self, query, n_neighbors):
"""
Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance
"""
if not isinstance(n_neighbors, int) or n_neighbors < 1:
raise ValueError('n_neighbors must be strictly positive integer')
neighbors = vptree._AutoSortingList(max_size=n_neighbors)
nodes_to_visit = [(self, 0)]
furthest_d = np.inf
while len(nodes_to_visit) > 0:
node, d0 = nodes_to_visit.pop(0)
if node is None or d0 > furthest_d:
continue
d = self.dist_fn(query, node.vp)
if d <= furthest_d: #Replaced < with <=
neighbors.append((d, node.vp))
furthest_d, _ = neighbors[-1]
if node._is_leaf():
continue
if node.left_min <= d <= node.left_max:
nodes_to_visit.insert(0, (node.left, 0))
elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:
nodes_to_visit.append((node.left,
node.left_min - d if d < node.left_min
else d - node.left_max))
if node.right_min <= d <= node.right_max:
nodes_to_visit.insert(0, (node.right, 0))
elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:
nodes_to_visit.append((node.right,
node.right_min - d if d < node.right_min
else d - node.right_max))
if len(neighbors) == 0:
neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan
return list(neighbors)
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in nearest_trees + distances_pool:
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))
|
normal
|
{
"blob_id": "22e6616fb98ecfb256587c3767c7c289decc6bf6",
"index": 3049,
"step-1": "<mask token>\n\n\nclass DynamicVPTree:\n <mask token>\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-3": "<mask token>\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n <mask token>\n\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d:\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left, node.left_min - d if d <\n node.left_min else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right, node.right_min - d if d <\n node.right_min else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]\n ]\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-4": "import vptree, itertools\nimport numpy as np\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n \"\"\"\n VPTree class extended to include the list of points within the tree\n \"\"\"\n\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d:\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left, node.left_min - d if d <\n node.left_min else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right, node.right_min - d if d <\n node.right_min else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]\n ]\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-5": "#Copyright (c) 2020 Ocado. All Rights Reserved.\n\nimport vptree, itertools\nimport numpy as np\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n \"\"\"\n VPTree class extended to include the list of points within the tree\n \"\"\"\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d: #Replaced < with <=\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left,\n node.left_min - d if d < node.left_min\n else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right,\n node.right_min - d if d < node.right_min\n else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))\n",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
class VideoClassSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
<|reserved_special_token_1|>
from .models import Video, VideoClass
from rest_framework import serializers
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
<|reserved_special_token_1|>
from .models import Video, VideoClass
from rest_framework import serializers
# Video 정보
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL','subTitle', 'numOfLike', 'numOfPlay']
# Video 분류
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = ('title', 'video_set')
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
|
flexible
|
{
"blob_id": "b20a8160ba455a39e990b8b37c5017645530ced3",
"index": 1545,
"step-1": "<mask token>\n\n\nclass VideoClassSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = VideoClass\n fields = 'title', 'video_set'\n\n def get_video_set(self, instance):\n videos = instance.video_set.all()\n return VideoSerializer(videos, many=True).data\n",
"step-2": "<mask token>\n\n\nclass VideoClassSerializer(serializers.ModelSerializer):\n video_set = serializers.SerializerMethodField()\n\n\n class Meta:\n model = VideoClass\n fields = 'title', 'video_set'\n\n def get_video_set(self, instance):\n videos = instance.video_set.all()\n return VideoSerializer(videos, many=True).data\n",
"step-3": "<mask token>\n\n\nclass VideoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Video\n fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']\n\n\nclass VideoClassSerializer(serializers.ModelSerializer):\n video_set = serializers.SerializerMethodField()\n\n\n class Meta:\n model = VideoClass\n fields = 'title', 'video_set'\n\n def get_video_set(self, instance):\n videos = instance.video_set.all()\n return VideoSerializer(videos, many=True).data\n",
"step-4": "from .models import Video, VideoClass\nfrom rest_framework import serializers\n\n\nclass VideoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Video\n fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']\n\n\nclass VideoClassSerializer(serializers.ModelSerializer):\n video_set = serializers.SerializerMethodField()\n\n\n class Meta:\n model = VideoClass\n fields = 'title', 'video_set'\n\n def get_video_set(self, instance):\n videos = instance.video_set.all()\n return VideoSerializer(videos, many=True).data\n",
"step-5": "from .models import Video, VideoClass\nfrom rest_framework import serializers\n\n\n# Video 정보\nclass VideoSerializer(serializers.ModelSerializer): \n class Meta:\n model = Video\n fields = ['videoURL','subTitle', 'numOfLike', 'numOfPlay']\n\n# Video 분류\nclass VideoClassSerializer(serializers.ModelSerializer):\n video_set = serializers.SerializerMethodField()\n\n class Meta:\n model = VideoClass\n fields = ('title', 'video_set')\n\n def get_video_set(self, instance):\n videos = instance.video_set.all()\n return VideoSerializer(videos, many=True).data\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/search_general', methods=['POST'])
def query():
message = None
searchQuery = request.json['searchQuery']
result = qp.generateQuery(searchQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/search_faceted', methods=['POST'])
def facQuery():
message = None
facQuery = request.json['facQuery']
result = qp.advancedQuery(facQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CORS(app)
<|reserved_special_token_0|>
@app.route('/search_general', methods=['POST'])
def query():
message = None
searchQuery = request.json['searchQuery']
result = qp.generateQuery(searchQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/search_faceted', methods=['POST'])
def facQuery():
message = None
facQuery = request.json['facQuery']
result = qp.advancedQuery(facQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
CORS(app)
qp = QueryProcessor()
@app.route('/search_general', methods=['POST'])
def query():
message = None
searchQuery = request.json['searchQuery']
result = qp.generateQuery(searchQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/search_faceted', methods=['POST'])
def facQuery():
message = None
facQuery = request.json['facQuery']
result = qp.advancedQuery(facQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, request
from flask import jsonify
from preprocessing import QueryProcessor
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
qp = QueryProcessor()
@app.route('/search_general', methods=['POST'])
def query():
message = None
searchQuery = request.json['searchQuery']
result = qp.generateQuery(searchQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/search_faceted', methods=['POST'])
def facQuery():
message = None
facQuery = request.json['facQuery']
result = qp.advancedQuery(facQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, request
from flask import jsonify
from preprocessing import QueryProcessor
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
qp = QueryProcessor()
@app.route('/search_general', methods=['POST'])
def query():
message = None
searchQuery = request.json['searchQuery']
result = qp.generateQuery(searchQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/search_faceted', methods=['POST'])
def facQuery():
message = None
facQuery = request.json['facQuery']
result = qp.advancedQuery(facQuery)
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == "__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "e582787a912f479830ed99575b2c6adb8088b4e5",
"index": 257,
"step-1": "<mask token>\n\n\[email protected]('/search_general', methods=['POST'])\ndef query():\n message = None\n searchQuery = request.json['searchQuery']\n result = qp.generateQuery(searchQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected]('/search_faceted', methods=['POST'])\ndef facQuery():\n message = None\n facQuery = request.json['facQuery']\n result = qp.advancedQuery(facQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n<mask token>\n",
"step-2": "<mask token>\nCORS(app)\n<mask token>\n\n\[email protected]('/search_general', methods=['POST'])\ndef query():\n message = None\n searchQuery = request.json['searchQuery']\n result = qp.generateQuery(searchQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected]('/search_faceted', methods=['POST'])\ndef facQuery():\n message = None\n facQuery = request.json['facQuery']\n result = qp.advancedQuery(facQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nCORS(app)\nqp = QueryProcessor()\n\n\[email protected]('/search_general', methods=['POST'])\ndef query():\n message = None\n searchQuery = request.json['searchQuery']\n result = qp.generateQuery(searchQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected]('/search_faceted', methods=['POST'])\ndef facQuery():\n message = None\n facQuery = request.json['facQuery']\n result = qp.advancedQuery(facQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, request\nfrom flask import jsonify\nfrom preprocessing import QueryProcessor\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\nqp = QueryProcessor()\n\n\[email protected]('/search_general', methods=['POST'])\ndef query():\n message = None\n searchQuery = request.json['searchQuery']\n result = qp.generateQuery(searchQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected]('/search_faceted', methods=['POST'])\ndef facQuery():\n message = None\n facQuery = request.json['facQuery']\n result = qp.advancedQuery(facQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, request\nfrom flask import jsonify\nfrom preprocessing import QueryProcessor\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\nqp = QueryProcessor()\n\n\[email protected]('/search_general', methods=['POST'])\ndef query():\n message = None\n searchQuery = request.json['searchQuery']\n result = qp.generateQuery(searchQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected]('/search_faceted', methods=['POST'])\ndef facQuery():\n message = None\n facQuery = request.json['facQuery']\n result = qp.advancedQuery(facQuery)\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from .dataset_readers import *
from .models import *
|
normal
|
{
"blob_id": "bc8bf06f1adedeb7b364308591bff09ac42d6c29",
"index": 3702,
"step-1": "<mask token>\n",
"step-2": "from .dataset_readers import *\nfrom .models import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def main():
if len(sys.argv) < 2:
print(
'Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file'
)
exit(-1)
sc = SparkContext(appName='HW4_Q2_LC')
try:
n = sc.textFile(sys.argv[1]).filter(lambda x: len(
NON_WORDS_DELIMITER.split(x)) > 10).count()
print('=' * 20)
print(' R E S U L T S ')
print('Lines with more than 10 words:', n)
print('=' * 20)
finally:
sc.stop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
if len(sys.argv) < 2:
print(
'Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file'
)
exit(-1)
sc = SparkContext(appName='HW4_Q2_LC')
try:
n = sc.textFile(sys.argv[1]).filter(lambda x: len(
NON_WORDS_DELIMITER.split(x)) > 10).count()
print('=' * 20)
print(' R E S U L T S ')
print('Lines with more than 10 words:', n)
print('=' * 20)
finally:
sc.stop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NON_WORDS_DELIMITER = re.compile('[^\\w\\d]+')
def main():
if len(sys.argv) < 2:
print(
'Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file'
)
exit(-1)
sc = SparkContext(appName='HW4_Q2_LC')
try:
n = sc.textFile(sys.argv[1]).filter(lambda x: len(
NON_WORDS_DELIMITER.split(x)) > 10).count()
print('=' * 20)
print(' R E S U L T S ')
print('Lines with more than 10 words:', n)
print('=' * 20)
finally:
sc.stop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import print_function
import re
import sys
from pyspark import SparkContext
NON_WORDS_DELIMITER = re.compile('[^\\w\\d]+')
def main():
if len(sys.argv) < 2:
print(
'Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file'
)
exit(-1)
sc = SparkContext(appName='HW4_Q2_LC')
try:
n = sc.textFile(sys.argv[1]).filter(lambda x: len(
NON_WORDS_DELIMITER.split(x)) > 10).count()
print('=' * 20)
print(' R E S U L T S ')
print('Lines with more than 10 words:', n)
print('=' * 20)
finally:
sc.stop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import print_function
import re
import sys
from pyspark import SparkContext
# define a regular expression for delimiters
NON_WORDS_DELIMITER = re.compile(r'[^\w\d]+')
def main():
if len(sys.argv) < 2:
print('''Usage: pyspark q2.py <file>
e.g. pyspark q2.py file:///home/cloudera/test_file''')
exit(-1)
sc = SparkContext(appName="HW4_Q2_LC")
try:
n = sc.textFile(sys.argv[1]) \
.filter(lambda x: len(NON_WORDS_DELIMITER.split(x)) > 10).count()
print("=" * 20)
print(" R E S U L T S ")
print("Lines with more than 10 words:", n)
print("=" * 20)
finally:
sc.stop()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "deff4eb3ae933a99036f39213ceaf2144b682904",
"index": 5025,
"step-1": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nNON_WORDS_DELIMITER = re.compile('[^\\\\w\\\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nimport re\nimport sys\nfrom pyspark import SparkContext\nNON_WORDS_DELIMITER = re.compile('[^\\\\w\\\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import print_function\n\nimport re\nimport sys\nfrom pyspark import SparkContext\n\n\n# define a regular expression for delimiters\nNON_WORDS_DELIMITER = re.compile(r'[^\\w\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print('''Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file''')\n exit(-1)\n\n sc = SparkContext(appName=\"HW4_Q2_LC\")\n try:\n n = sc.textFile(sys.argv[1]) \\\n .filter(lambda x: len(NON_WORDS_DELIMITER.split(x)) > 10).count()\n print(\"=\" * 20)\n print(\" R E S U L T S \")\n print(\"Lines with more than 10 words:\", n)\n print(\"=\" * 20)\n finally:\n sc.stop()\n \n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from mcpi.minecraft import Minecraft
import random, time
while True:
x, y, z = mc.player.getTilePos()
color = random.randrange(0, 9)
mc.setBlock(x, y, z - 1, 38, color)
time.sleep(0.01)
|
normal
|
{
"blob_id": "a2e00af84f743e949b53840ae6d5509e08935486",
"index": 7978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n x, y, z = mc.player.getTilePos()\n color = random.randrange(0, 9)\n mc.setBlock(x, y, z - 1, 38, color)\n time.sleep(0.01)\n",
"step-3": "from mcpi.minecraft import Minecraft\nimport random, time\nwhile True:\n x, y, z = mc.player.getTilePos()\n color = random.randrange(0, 9)\n mc.setBlock(x, y, z - 1, 38, color)\n time.sleep(0.01)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
import requests
import json
import boto3
from lxml.html import parse
CardTitlePrefix = "Greeting"
def build_speechlet_response(title, output, reprompt_text, should_end_session):
"""
Build a speechlet JSON representation of the title, output text,
reprompt text & end of session
"""
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': CardTitlePrefix + " - " + title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
"""
Build the full response JSON from the speechlet response
"""
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def get_welcome_response():
welcome_response= "Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda."
print(welcome_response);
session_attributes = {}
card_title = "Hello"
speech_output = welcome_response;
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "I'm sorry - I didn't understand. You should say give me latest motions."
should_end_session = True
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def replace_with_longform_name(name):
if name == "LASD":
longformName = "Los Angeles County Sheriff's Department"
elif name == "DMH":
longformName = "Department of Mental Health"
else:
longformName = name;
return longformName;
def get_next_motions_response(session):
print("Initial session attributes are "+str(session['attributes']));
if "result_number" not in session['attributes']:
print("Second session attributes are "+str(session['attributes']));
session['attributes']['result_number'] = 1;
print("Value is "+str(session['attributes']['result_number']));
print("Final session attributes are "+str(session['attributes']))
result_number = session['attributes']['result_number'];
host = "http://api.lacounty.gov";
url = host + "/searchAPIWeb/searchapi?type=bcsearch&database=OMD&" \
"SearchTerm=1&title=1&content=1&PStart=" + str(result_number) +"&PEnd=" + str(result_number) +"&_=1509121047612"
response = requests.get(url);
#print(response.text);
data = json.loads(response.text)
alexaResponse = "";
if(result_number == 1):
alexaResponse = "Here is the latest correspondence before the L.A. board (both upcoming and past): "
alexaResponse += str(result_number)+": From the "+replace_with_longform_name(data["results"][0]["department"])+ ", "
alexaResponse += "on "+data["results"][0]["date"]+", "
alexaResponse += data["results"][0]["title"]+"... "
alexaResponse += "You can say text me link or next item"
session['attributes']['result_number'] = result_number + 1;
session['attributes']['result_url'] = data["results"][0]["url"];
#text_url_to_number(session);
reprompt_text = "I'm sorry - I didn't understand. You should say text me link or next item"
card_title = "LA Board Latest Motions Message";
greeting_string = alexaResponse;
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))
def get_next_agenda_response(session):
print("Initial session attributes are "+str(session['attributes']));
host = "http://bos.lacounty.gov/Board-Meeting/Board-Agendas";
url = host;
page = parse(url)
nodes = page.xpath("//div[a[text()='View Agenda']]");
latest_agenda_node = nodes[0];
headline = latest_agenda_node.find("ul").xpath("string()").strip();
print(headline);
agenda_url = latest_agenda_node.find("a[@href]").attrib['href'];
print("http://bos.lacounty.gov"+agenda_url)
agenda_heading = headline;
#session['attributes']['result_url']
session['attributes']['result_url'] = "http://bos.lacounty.gov"+agenda_url;
card_title = "Agenda";
greeting_string = "I have a link for the "+agenda_heading+". Say text me and I'll send it to you.";
reprompt = "Say text me to receive a link to the agenda."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt, False))
def text_url_to_number(session, intent):
if "phone_number" not in session['attributes'] and "value" not in intent['slots']['phoneNumber']:
greeting_string = "Say your nine digit phone number, including the area code";
card_title = "What's your phone number?";
reprompt_text = "I didn't understand. Please say your nine digit mobile phone number."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))
else:
number = intent['slots']['phoneNumber']['value'];
if "result_url" not in session['attributes']:
session['attributes']['result_url'] = 'http://portal.lacounty.gov/wps/portal/omd';
url = session['attributes']['result_url'];
session['attributes']['phone_number'] = number;
sns_client = boto3.client('sns')
response = sns_client.publish(
PhoneNumber='1'+str(number),
Message="Thank you for using the LA Board of Supervisors Skill. Here's your URL: "+url
)
greeting_string = "Sent text message to "+ " ".join(number);
card_title = "Sent motion URL via text message";
reprompt_text = "I didn't understand. Please say your nine digit mobile phone number."
return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, True))
def on_session_started(session_started_request, session):
""" Called when the session starts """
#session.attributes['result_number'] = 1
session['attributes'] = {}
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def handle_session_end_request():
card_title = "County of LA Board of Supervisors Skill- Thanks"
speech_output = "Thank you for using the County of LA Board of Supervisors Skill. See you next time!"
should_end_session = True
return build_response({}, build_speechlet_response(card_title, speech_output, None, should_end_session));
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they want """
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "GetLatestAgendaIntent":
return get_next_agenda_response(session)
elif intent_name == "GetLatestMotionsIntent":
return get_next_motions_response(session)
elif intent_name == "GetNextMotionIntent":
return get_next_motions_response(session)
elif intent_name == "SetPhoneNumberIntent":
return text_url_to_number(session, intent);
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def lambda_handler(event, context):
print("Test!")
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return handle_session_end_request()
|
normal
|
{
"blob_id": "237277e132c8223c6048be9b754516635ab720e2",
"index": 8964,
"step-1": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n \"\"\"\n Build the full response JSON from the speechlet response\n \"\"\"\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n welcome_response = (\n 'Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda.'\n )\n print(welcome_response)\n session_attributes = {}\n card_title = 'Hello'\n speech_output = welcome_response\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say give me latest motions.\"\n )\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\n<mask token>\n\n\ndef get_next_motions_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n if 'result_number' not in session['attributes']:\n print('Second session attributes are ' + str(session['attributes']))\n session['attributes']['result_number'] = 1\n print('Value is ' + str(session['attributes']['result_number']))\n print('Final session attributes are ' + str(session['attributes']))\n result_number = session['attributes']['result_number']\n host = 'http://api.lacounty.gov'\n url = (host +\n '/searchAPIWeb/searchapi?type=bcsearch&database=OMD&SearchTerm=1&title=1&content=1&PStart='\n + str(result_number) + '&PEnd=' + str(result_number) +\n '&_=1509121047612')\n response = requests.get(url)\n data = json.loads(response.text)\n alexaResponse = ''\n if result_number == 1:\n alexaResponse = (\n 'Here is the latest correspondence before the L.A. board (both upcoming and past): '\n )\n alexaResponse += str(result_number\n ) + ': From the ' + replace_with_longform_name(data['results'][0][\n 'department']) + ', '\n alexaResponse += 'on ' + data['results'][0]['date'] + ', '\n alexaResponse += data['results'][0]['title'] + '... '\n alexaResponse += 'You can say text me link or next item'\n session['attributes']['result_number'] = result_number + 1\n session['attributes']['result_url'] = data['results'][0]['url']\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say text me link or next item\"\n )\n card_title = 'LA Board Latest Motions Message'\n greeting_string = alexaResponse\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt_text, False))\n\n\n<mask token>\n\n\ndef text_url_to_number(session, intent):\n if 'phone_number' not in session['attributes'] and 'value' not in intent[\n 'slots']['phoneNumber']:\n greeting_string = (\n 'Say your nine digit phone number, including the area code')\n card_title = \"What's your phone number?\"\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, False))\n else:\n number = intent['slots']['phoneNumber']['value']\n if 'result_url' not in session['attributes']:\n session['attributes']['result_url'\n ] = 'http://portal.lacounty.gov/wps/portal/omd'\n url = session['attributes']['result_url']\n session['attributes']['phone_number'] = number\n sns_client = boto3.client('sns')\n response = sns_client.publish(PhoneNumber='1' + str(number),\n Message=\n \"Thank you for using the LA Board of Supervisors Skill. Here's your URL: \"\n + url)\n greeting_string = 'Sent text message to ' + ' '.join(number)\n card_title = 'Sent motion URL via text message'\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, True))\n\n\n<mask token>\n\n\ndef handle_session_end_request():\n card_title = 'County of LA Board of Supervisors Skill- Thanks'\n speech_output = (\n 'Thank you for using the County of LA Board of Supervisors Skill. See you next time!'\n )\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if intent_name == 'GetLatestAgendaIntent':\n return get_next_agenda_response(session)\n elif intent_name == 'GetLatestMotionsIntent':\n return get_next_motions_response(session)\n elif intent_name == 'GetNextMotionIntent':\n return get_next_motions_response(session)\n elif intent_name == 'SetPhoneNumberIntent':\n return text_url_to_number(session, intent)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef lambda_handler(event, context):\n print('Test!')\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return handle_session_end_request()\n",
"step-2": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n \"\"\"\n Build the full response JSON from the speechlet response\n \"\"\"\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n welcome_response = (\n 'Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda.'\n )\n print(welcome_response)\n session_attributes = {}\n card_title = 'Hello'\n speech_output = welcome_response\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say give me latest motions.\"\n )\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef replace_with_longform_name(name):\n if name == 'LASD':\n longformName = \"Los Angeles County Sheriff's Department\"\n elif name == 'DMH':\n longformName = 'Department of Mental Health'\n else:\n longformName = name\n return longformName\n\n\ndef get_next_motions_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n if 'result_number' not in session['attributes']:\n print('Second session attributes are ' + str(session['attributes']))\n session['attributes']['result_number'] = 1\n print('Value is ' + str(session['attributes']['result_number']))\n print('Final session attributes are ' + str(session['attributes']))\n result_number = session['attributes']['result_number']\n host = 'http://api.lacounty.gov'\n url = (host +\n '/searchAPIWeb/searchapi?type=bcsearch&database=OMD&SearchTerm=1&title=1&content=1&PStart='\n + str(result_number) + '&PEnd=' + str(result_number) +\n '&_=1509121047612')\n response = requests.get(url)\n data = json.loads(response.text)\n alexaResponse = ''\n if result_number == 1:\n alexaResponse = (\n 'Here is the latest correspondence before the L.A. board (both upcoming and past): '\n )\n alexaResponse += str(result_number\n ) + ': From the ' + replace_with_longform_name(data['results'][0][\n 'department']) + ', '\n alexaResponse += 'on ' + data['results'][0]['date'] + ', '\n alexaResponse += data['results'][0]['title'] + '... '\n alexaResponse += 'You can say text me link or next item'\n session['attributes']['result_number'] = result_number + 1\n session['attributes']['result_url'] = data['results'][0]['url']\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say text me link or next item\"\n )\n card_title = 'LA Board Latest Motions Message'\n greeting_string = alexaResponse\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt_text, False))\n\n\ndef get_next_agenda_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n host = 'http://bos.lacounty.gov/Board-Meeting/Board-Agendas'\n url = host\n page = parse(url)\n nodes = page.xpath(\"//div[a[text()='View Agenda']]\")\n latest_agenda_node = nodes[0]\n headline = latest_agenda_node.find('ul').xpath('string()').strip()\n print(headline)\n agenda_url = latest_agenda_node.find('a[@href]').attrib['href']\n print('http://bos.lacounty.gov' + agenda_url)\n agenda_heading = headline\n session['attributes']['result_url'\n ] = 'http://bos.lacounty.gov' + agenda_url\n card_title = 'Agenda'\n greeting_string = ('I have a link for the ' + agenda_heading +\n \". Say text me and I'll send it to you.\")\n reprompt = 'Say text me to receive a link to the agenda.'\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt, False))\n\n\ndef text_url_to_number(session, intent):\n if 'phone_number' not in session['attributes'] and 'value' not in intent[\n 'slots']['phoneNumber']:\n greeting_string = (\n 'Say your nine digit phone number, including the area code')\n card_title = \"What's your phone number?\"\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, False))\n else:\n number = intent['slots']['phoneNumber']['value']\n if 'result_url' not in session['attributes']:\n session['attributes']['result_url'\n ] = 'http://portal.lacounty.gov/wps/portal/omd'\n url = session['attributes']['result_url']\n session['attributes']['phone_number'] = number\n sns_client = boto3.client('sns')\n response = sns_client.publish(PhoneNumber='1' + str(number),\n Message=\n \"Thank you for using the LA Board of Supervisors Skill. Here's your URL: \"\n + url)\n greeting_string = 'Sent text message to ' + ' '.join(number)\n card_title = 'Sent motion URL via text message'\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, True))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n session['attributes'] = {}\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef handle_session_end_request():\n card_title = 'County of LA Board of Supervisors Skill- Thanks'\n speech_output = (\n 'Thank you for using the County of LA Board of Supervisors Skill. See you next time!'\n )\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if intent_name == 'GetLatestAgendaIntent':\n return get_next_agenda_response(session)\n elif intent_name == 'GetLatestMotionsIntent':\n return get_next_motions_response(session)\n elif intent_name == 'GetNextMotionIntent':\n return get_next_motions_response(session)\n elif intent_name == 'SetPhoneNumberIntent':\n return text_url_to_number(session, intent)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef lambda_handler(event, context):\n print('Test!')\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return handle_session_end_request()\n",
"step-3": "<mask token>\nCardTitlePrefix = 'Greeting'\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n \"\"\"\n Build a speechlet JSON representation of the title, output text, \n reprompt text & end of session\n \"\"\"\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': CardTitlePrefix + ' - ' + title,\n 'content': output}, 'reprompt': {'outputSpeech': {'type':\n 'PlainText', 'text': reprompt_text}}, 'shouldEndSession':\n should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n \"\"\"\n Build the full response JSON from the speechlet response\n \"\"\"\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n welcome_response = (\n 'Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda.'\n )\n print(welcome_response)\n session_attributes = {}\n card_title = 'Hello'\n speech_output = welcome_response\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say give me latest motions.\"\n )\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef replace_with_longform_name(name):\n if name == 'LASD':\n longformName = \"Los Angeles County Sheriff's Department\"\n elif name == 'DMH':\n longformName = 'Department of Mental Health'\n else:\n longformName = name\n return longformName\n\n\ndef get_next_motions_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n if 'result_number' not in session['attributes']:\n print('Second session attributes are ' + str(session['attributes']))\n session['attributes']['result_number'] = 1\n print('Value is ' + str(session['attributes']['result_number']))\n print('Final session attributes are ' + str(session['attributes']))\n result_number = session['attributes']['result_number']\n host = 'http://api.lacounty.gov'\n url = (host +\n '/searchAPIWeb/searchapi?type=bcsearch&database=OMD&SearchTerm=1&title=1&content=1&PStart='\n + str(result_number) + '&PEnd=' + str(result_number) +\n '&_=1509121047612')\n response = requests.get(url)\n data = json.loads(response.text)\n alexaResponse = ''\n if result_number == 1:\n alexaResponse = (\n 'Here is the latest correspondence before the L.A. board (both upcoming and past): '\n )\n alexaResponse += str(result_number\n ) + ': From the ' + replace_with_longform_name(data['results'][0][\n 'department']) + ', '\n alexaResponse += 'on ' + data['results'][0]['date'] + ', '\n alexaResponse += data['results'][0]['title'] + '... '\n alexaResponse += 'You can say text me link or next item'\n session['attributes']['result_number'] = result_number + 1\n session['attributes']['result_url'] = data['results'][0]['url']\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say text me link or next item\"\n )\n card_title = 'LA Board Latest Motions Message'\n greeting_string = alexaResponse\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt_text, False))\n\n\ndef get_next_agenda_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n host = 'http://bos.lacounty.gov/Board-Meeting/Board-Agendas'\n url = host\n page = parse(url)\n nodes = page.xpath(\"//div[a[text()='View Agenda']]\")\n latest_agenda_node = nodes[0]\n headline = latest_agenda_node.find('ul').xpath('string()').strip()\n print(headline)\n agenda_url = latest_agenda_node.find('a[@href]').attrib['href']\n print('http://bos.lacounty.gov' + agenda_url)\n agenda_heading = headline\n session['attributes']['result_url'\n ] = 'http://bos.lacounty.gov' + agenda_url\n card_title = 'Agenda'\n greeting_string = ('I have a link for the ' + agenda_heading +\n \". Say text me and I'll send it to you.\")\n reprompt = 'Say text me to receive a link to the agenda.'\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt, False))\n\n\ndef text_url_to_number(session, intent):\n if 'phone_number' not in session['attributes'] and 'value' not in intent[\n 'slots']['phoneNumber']:\n greeting_string = (\n 'Say your nine digit phone number, including the area code')\n card_title = \"What's your phone number?\"\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, False))\n else:\n number = intent['slots']['phoneNumber']['value']\n if 'result_url' not in session['attributes']:\n session['attributes']['result_url'\n ] = 'http://portal.lacounty.gov/wps/portal/omd'\n url = session['attributes']['result_url']\n session['attributes']['phone_number'] = number\n sns_client = boto3.client('sns')\n response = sns_client.publish(PhoneNumber='1' + str(number),\n Message=\n \"Thank you for using the LA Board of Supervisors Skill. Here's your URL: \"\n + url)\n greeting_string = 'Sent text message to ' + ' '.join(number)\n card_title = 'Sent motion URL via text message'\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, True))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n session['attributes'] = {}\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef handle_session_end_request():\n card_title = 'County of LA Board of Supervisors Skill- Thanks'\n speech_output = (\n 'Thank you for using the County of LA Board of Supervisors Skill. See you next time!'\n )\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if intent_name == 'GetLatestAgendaIntent':\n return get_next_agenda_response(session)\n elif intent_name == 'GetLatestMotionsIntent':\n return get_next_motions_response(session)\n elif intent_name == 'GetNextMotionIntent':\n return get_next_motions_response(session)\n elif intent_name == 'SetPhoneNumberIntent':\n return text_url_to_number(session, intent)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef lambda_handler(event, context):\n print('Test!')\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return handle_session_end_request()\n",
"step-4": "import requests\nimport json\nimport boto3\nfrom lxml.html import parse\nCardTitlePrefix = 'Greeting'\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n \"\"\"\n Build a speechlet JSON representation of the title, output text, \n reprompt text & end of session\n \"\"\"\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': CardTitlePrefix + ' - ' + title,\n 'content': output}, 'reprompt': {'outputSpeech': {'type':\n 'PlainText', 'text': reprompt_text}}, 'shouldEndSession':\n should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n \"\"\"\n Build the full response JSON from the speechlet response\n \"\"\"\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n welcome_response = (\n 'Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda.'\n )\n print(welcome_response)\n session_attributes = {}\n card_title = 'Hello'\n speech_output = welcome_response\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say give me latest motions.\"\n )\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef replace_with_longform_name(name):\n if name == 'LASD':\n longformName = \"Los Angeles County Sheriff's Department\"\n elif name == 'DMH':\n longformName = 'Department of Mental Health'\n else:\n longformName = name\n return longformName\n\n\ndef get_next_motions_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n if 'result_number' not in session['attributes']:\n print('Second session attributes are ' + str(session['attributes']))\n session['attributes']['result_number'] = 1\n print('Value is ' + str(session['attributes']['result_number']))\n print('Final session attributes are ' + str(session['attributes']))\n result_number = session['attributes']['result_number']\n host = 'http://api.lacounty.gov'\n url = (host +\n '/searchAPIWeb/searchapi?type=bcsearch&database=OMD&SearchTerm=1&title=1&content=1&PStart='\n + str(result_number) + '&PEnd=' + str(result_number) +\n '&_=1509121047612')\n response = requests.get(url)\n data = json.loads(response.text)\n alexaResponse = ''\n if result_number == 1:\n alexaResponse = (\n 'Here is the latest correspondence before the L.A. board (both upcoming and past): '\n )\n alexaResponse += str(result_number\n ) + ': From the ' + replace_with_longform_name(data['results'][0][\n 'department']) + ', '\n alexaResponse += 'on ' + data['results'][0]['date'] + ', '\n alexaResponse += data['results'][0]['title'] + '... '\n alexaResponse += 'You can say text me link or next item'\n session['attributes']['result_number'] = result_number + 1\n session['attributes']['result_url'] = data['results'][0]['url']\n reprompt_text = (\n \"I'm sorry - I didn't understand. You should say text me link or next item\"\n )\n card_title = 'LA Board Latest Motions Message'\n greeting_string = alexaResponse\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt_text, False))\n\n\ndef get_next_agenda_response(session):\n print('Initial session attributes are ' + str(session['attributes']))\n host = 'http://bos.lacounty.gov/Board-Meeting/Board-Agendas'\n url = host\n page = parse(url)\n nodes = page.xpath(\"//div[a[text()='View Agenda']]\")\n latest_agenda_node = nodes[0]\n headline = latest_agenda_node.find('ul').xpath('string()').strip()\n print(headline)\n agenda_url = latest_agenda_node.find('a[@href]').attrib['href']\n print('http://bos.lacounty.gov' + agenda_url)\n agenda_heading = headline\n session['attributes']['result_url'\n ] = 'http://bos.lacounty.gov' + agenda_url\n card_title = 'Agenda'\n greeting_string = ('I have a link for the ' + agenda_heading +\n \". Say text me and I'll send it to you.\")\n reprompt = 'Say text me to receive a link to the agenda.'\n return build_response(session['attributes'], build_speechlet_response(\n card_title, greeting_string, reprompt, False))\n\n\ndef text_url_to_number(session, intent):\n if 'phone_number' not in session['attributes'] and 'value' not in intent[\n 'slots']['phoneNumber']:\n greeting_string = (\n 'Say your nine digit phone number, including the area code')\n card_title = \"What's your phone number?\"\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, False))\n else:\n number = intent['slots']['phoneNumber']['value']\n if 'result_url' not in session['attributes']:\n session['attributes']['result_url'\n ] = 'http://portal.lacounty.gov/wps/portal/omd'\n url = session['attributes']['result_url']\n session['attributes']['phone_number'] = number\n sns_client = boto3.client('sns')\n response = sns_client.publish(PhoneNumber='1' + str(number),\n Message=\n \"Thank you for using the LA Board of Supervisors Skill. Here's your URL: \"\n + url)\n greeting_string = 'Sent text message to ' + ' '.join(number)\n card_title = 'Sent motion URL via text message'\n reprompt_text = (\n \"I didn't understand. Please say your nine digit mobile phone number.\"\n )\n return build_response(session['attributes'],\n build_speechlet_response(card_title, greeting_string,\n reprompt_text, True))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n session['attributes'] = {}\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef handle_session_end_request():\n card_title = 'County of LA Board of Supervisors Skill- Thanks'\n speech_output = (\n 'Thank you for using the County of LA Board of Supervisors Skill. See you next time!'\n )\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if intent_name == 'GetLatestAgendaIntent':\n return get_next_agenda_response(session)\n elif intent_name == 'GetLatestMotionsIntent':\n return get_next_motions_response(session)\n elif intent_name == 'GetNextMotionIntent':\n return get_next_motions_response(session)\n elif intent_name == 'SetPhoneNumberIntent':\n return text_url_to_number(session, intent)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef lambda_handler(event, context):\n print('Test!')\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return handle_session_end_request()\n",
"step-5": "# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport boto3\nfrom lxml.html import parse\n\nCardTitlePrefix = \"Greeting\"\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n \"\"\"\n Build a speechlet JSON representation of the title, output text, \n reprompt text & end of session\n \"\"\"\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': CardTitlePrefix + \" - \" + title,\n 'content': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n \ndef build_response(session_attributes, speechlet_response):\n \"\"\"\n Build the full response JSON from the speechlet response\n \"\"\"\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\ndef get_welcome_response():\n welcome_response= \"Welcome to the L.A. Board of Supervisors Skill. You can say, give me recent motions or give me the latest agenda.\"\n print(welcome_response);\n\n session_attributes = {}\n card_title = \"Hello\"\n speech_output = welcome_response;\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"I'm sorry - I didn't understand. You should say give me latest motions.\"\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))\n\ndef replace_with_longform_name(name):\n\n if name == \"LASD\":\n longformName = \"Los Angeles County Sheriff's Department\"\n elif name == \"DMH\":\n longformName = \"Department of Mental Health\"\n else:\n longformName = name;\n\n return longformName;\n\n\ndef get_next_motions_response(session):\n \n print(\"Initial session attributes are \"+str(session['attributes']));\n\n if \"result_number\" not in session['attributes']:\n print(\"Second session attributes are \"+str(session['attributes']));\n session['attributes']['result_number'] = 1;\n print(\"Value is \"+str(session['attributes']['result_number']));\n print(\"Final session attributes are \"+str(session['attributes']))\n\n result_number = session['attributes']['result_number'];\n host = \"http://api.lacounty.gov\";\n\n url = host + \"/searchAPIWeb/searchapi?type=bcsearch&database=OMD&\" \\\n \"SearchTerm=1&title=1&content=1&PStart=\" + str(result_number) +\"&PEnd=\" + str(result_number) +\"&_=1509121047612\"\n\n response = requests.get(url);\n #print(response.text);\n data = json.loads(response.text)\n\n alexaResponse = \"\";\n if(result_number == 1):\n alexaResponse = \"Here is the latest correspondence before the L.A. board (both upcoming and past): \"\n\n alexaResponse += str(result_number)+\": From the \"+replace_with_longform_name(data[\"results\"][0][\"department\"])+ \", \"\n alexaResponse += \"on \"+data[\"results\"][0][\"date\"]+\", \"\n alexaResponse += data[\"results\"][0][\"title\"]+\"... \"\n \n alexaResponse += \"You can say text me link or next item\"\n \n session['attributes']['result_number'] = result_number + 1;\n session['attributes']['result_url'] = data[\"results\"][0][\"url\"];\n \n #text_url_to_number(session);\n reprompt_text = \"I'm sorry - I didn't understand. You should say text me link or next item\"\n \n card_title = \"LA Board Latest Motions Message\";\n greeting_string = alexaResponse;\n return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))\n \ndef get_next_agenda_response(session):\n \n print(\"Initial session attributes are \"+str(session['attributes']));\n \n host = \"http://bos.lacounty.gov/Board-Meeting/Board-Agendas\";\n url = host;\n page = parse(url)\n nodes = page.xpath(\"//div[a[text()='View Agenda']]\");\n latest_agenda_node = nodes[0];\n headline = latest_agenda_node.find(\"ul\").xpath(\"string()\").strip();\n \n print(headline);\n agenda_url = latest_agenda_node.find(\"a[@href]\").attrib['href'];\n print(\"http://bos.lacounty.gov\"+agenda_url)\n \n agenda_heading = headline;\n #session['attributes']['result_url']\n session['attributes']['result_url'] = \"http://bos.lacounty.gov\"+agenda_url;\n card_title = \"Agenda\";\n greeting_string = \"I have a link for the \"+agenda_heading+\". Say text me and I'll send it to you.\";\n reprompt = \"Say text me to receive a link to the agenda.\"\n\n return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt, False))\n \n \ndef text_url_to_number(session, intent):\n \n if \"phone_number\" not in session['attributes'] and \"value\" not in intent['slots']['phoneNumber']:\n greeting_string = \"Say your nine digit phone number, including the area code\";\n card_title = \"What's your phone number?\";\n reprompt_text = \"I didn't understand. Please say your nine digit mobile phone number.\"\n return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, False))\n else:\n number = intent['slots']['phoneNumber']['value'];\n if \"result_url\" not in session['attributes']:\n session['attributes']['result_url'] = 'http://portal.lacounty.gov/wps/portal/omd';\n \n url = session['attributes']['result_url'];\n session['attributes']['phone_number'] = number;\n \n sns_client = boto3.client('sns')\n response = sns_client.publish(\n PhoneNumber='1'+str(number), \n Message=\"Thank you for using the LA Board of Supervisors Skill. Here's your URL: \"+url\n )\n greeting_string = \"Sent text message to \"+ \" \".join(number);\n card_title = \"Sent motion URL via text message\";\n reprompt_text = \"I didn't understand. Please say your nine digit mobile phone number.\"\n return build_response(session['attributes'], build_speechlet_response(card_title, greeting_string, reprompt_text, True))\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\ndef handle_session_end_request():\n card_title = \"County of LA Board of Supervisors Skill- Thanks\"\n speech_output = \"Thank you for using the County of LA Board of Supervisors Skill. See you next time!\"\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title, speech_output, None, should_end_session));\n \ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n \ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n \n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetLatestAgendaIntent\":\n return get_next_agenda_response(session)\n elif intent_name == \"GetLatestMotionsIntent\":\n return get_next_motions_response(session)\n elif intent_name == \"GetNextMotionIntent\":\n return get_next_motions_response(session)\n elif intent_name == \"SetPhoneNumberIntent\":\n return text_url_to_number(session, intent);\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\ndef lambda_handler(event, context):\n print(\"Test!\")\n \n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n \n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return handle_session_end_request()\n",
"step-ids": [
8,
11,
13,
14,
15
]
}
|
[
8,
11,
13,
14,
15
] |
from sand_game.Environment import Environment
from sand_game.behaviours.Behaviour import Behaviour
class EphemeralBehaviour(Behaviour):
"""Removes the particle after one frame
"""
def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:
env.set(loc[0], loc[1], None)
|
normal
|
{
"blob_id": "2728c3ab26fbdbaac9c47054eafe1c114341f6f2",
"index": 7736,
"step-1": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n <mask token>\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n",
"step-3": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n \"\"\"Removes the particle after one frame\n \"\"\"\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n",
"step-4": "from sand_game.Environment import Environment\nfrom sand_game.behaviours.Behaviour import Behaviour\n\n\nclass EphemeralBehaviour(Behaviour):\n \"\"\"Removes the particle after one frame\n \"\"\"\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def unescape(text):
return text.replace(''', "'").replace('<', '<').replace('>', '>')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def unescape(text):
return text.replace(''', "'").replace('<', '<').replace('>', '>')
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.content.startswith(translate_command):
lang = message.content[len(translate_command):message.content.find(' ')
]
ttt = message.content[len(translate_command) + len(lang) + 1:]
s = ttt.find(id_start)
while s != -1:
e = ttt.find('>', s)
ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])
).name + ttt[e:]
s = ttt.find(id_start)
body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else
lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}
r = requests.get('https://api.mymemory.translated.net/get', params=body
)
message_sent = await message.channel.send(unescape(r.json()[
'responseData']['translatedText']))
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '❌'
try:
reaction, user = await client.wait_for('reaction_add', timeout=
600.0, check=check)
except asyncio.TimeoutError:
pass
else:
await message_sent.delete()
client.run(TOKEN)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TOKEN = 'TOKEN'
CONTACT_EMAIL = None
translate_command = '$t'
id_start = '<@!'
client = discord.Client()
def unescape(text):
return text.replace(''', "'").replace('<', '<').replace('>', '>')
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.content.startswith(translate_command):
lang = message.content[len(translate_command):message.content.find(' ')
]
ttt = message.content[len(translate_command) + len(lang) + 1:]
s = ttt.find(id_start)
while s != -1:
e = ttt.find('>', s)
ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])
).name + ttt[e:]
s = ttt.find(id_start)
body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else
lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}
r = requests.get('https://api.mymemory.translated.net/get', params=body
)
message_sent = await message.channel.send(unescape(r.json()[
'responseData']['translatedText']))
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '❌'
try:
reaction, user = await client.wait_for('reaction_add', timeout=
600.0, check=check)
except asyncio.TimeoutError:
pass
else:
await message_sent.delete()
client.run(TOKEN)
<|reserved_special_token_1|>
import discord, requests
from random import choice
TOKEN = 'TOKEN'
CONTACT_EMAIL = None
translate_command = '$t'
id_start = '<@!'
client = discord.Client()
def unescape(text):
return text.replace(''', "'").replace('<', '<').replace('>', '>')
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.content.startswith(translate_command):
lang = message.content[len(translate_command):message.content.find(' ')
]
ttt = message.content[len(translate_command) + len(lang) + 1:]
s = ttt.find(id_start)
while s != -1:
e = ttt.find('>', s)
ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])
).name + ttt[e:]
s = ttt.find(id_start)
body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else
lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}
r = requests.get('https://api.mymemory.translated.net/get', params=body
)
message_sent = await message.channel.send(unescape(r.json()[
'responseData']['translatedText']))
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '❌'
try:
reaction, user = await client.wait_for('reaction_add', timeout=
600.0, check=check)
except asyncio.TimeoutError:
pass
else:
await message_sent.delete()
client.run(TOKEN)
<|reserved_special_token_1|>
import discord, requests
from random import choice
TOKEN = 'TOKEN'
CONTACT_EMAIL = None #'Contact email for getting 10000 words/day instead of 1000'
translate_command = '$t'
id_start = '<@!'
client = discord.Client()
def unescape(text):
return text.replace(''', '\'').replace('<','<').replace('>', '>') # to improve
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.content.startswith(translate_command):
lang = message.content[len(translate_command):message.content.find(' ')]
ttt = message.content[len(translate_command)+len(lang)+1:]
s = ttt.find(id_start)
while s != -1:
e = ttt.find('>',s)
ttt = ttt[:s]+client.get_user(int(ttt[s+len(id_start):e])).name+ttt[e:]
s = ttt.find(id_start)
body = {
'q': ttt,
'langpair': lang+'|en' if len(lang) == 2 else lang[:2]+'|'+lang[2:],
'de': CONTACT_EMAIL
}
r = requests.get('https://api.mymemory.translated.net/get', params=body)
message_sent = await message.channel.send(unescape(r.json()['responseData']['translatedText']))
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '❌'
try:
reaction, user = await client.wait_for('reaction_add', timeout=600.0, check=check)
except asyncio.TimeoutError:
pass
else:
await message_sent.delete()
client.run(TOKEN)
|
flexible
|
{
"blob_id": "1ab69874a89311b22220dda541dfe03462a98a55",
"index": 2243,
"step-1": "<mask token>\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-3": "<mask token>\nTOKEN = 'TOKEN'\nCONTACT_EMAIL = None\ntranslate_command = '$t'\nid_start = '<@!'\nclient = discord.Client()\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-4": "import discord, requests\nfrom random import choice\nTOKEN = 'TOKEN'\nCONTACT_EMAIL = None\ntranslate_command = '$t'\nid_start = '<@!'\nclient = discord.Client()\n\n\ndef unescape(text):\n return text.replace(''', \"'\").replace('<', '<').replace('>', '>')\n\n\[email protected]\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\[email protected]\nasync def on_message(message):\n if message.content.startswith(translate_command):\n lang = message.content[len(translate_command):message.content.find(' ')\n ]\n ttt = message.content[len(translate_command) + len(lang) + 1:]\n s = ttt.find(id_start)\n while s != -1:\n e = ttt.find('>', s)\n ttt = ttt[:s] + client.get_user(int(ttt[s + len(id_start):e])\n ).name + ttt[e:]\n s = ttt.find(id_start)\n body = {'q': ttt, 'langpair': lang + '|en' if len(lang) == 2 else \n lang[:2] + '|' + lang[2:], 'de': CONTACT_EMAIL}\n r = requests.get('https://api.mymemory.translated.net/get', params=body\n )\n message_sent = await message.channel.send(unescape(r.json()[\n 'responseData']['translatedText']))\n\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '❌'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=\n 600.0, check=check)\n except asyncio.TimeoutError:\n pass\n else:\n await message_sent.delete()\n\n\nclient.run(TOKEN)\n",
"step-5": "import discord, requests\r\nfrom random import choice\r\n\r\nTOKEN = 'TOKEN'\r\nCONTACT_EMAIL = None #'Contact email for getting 10000 words/day instead of 1000'\r\n\r\ntranslate_command = '$t'\r\nid_start = '<@!'\r\n\r\nclient = discord.Client()\r\n\r\ndef unescape(text):\r\n return text.replace(''', '\\'').replace('<','<').replace('>', '>') # to improve\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(f'{client.user} has connected to Discord!')\r\n\r\[email protected]\r\nasync def on_message(message):\r\n if message.content.startswith(translate_command):\r\n lang = message.content[len(translate_command):message.content.find(' ')]\r\n ttt = message.content[len(translate_command)+len(lang)+1:]\r\n s = ttt.find(id_start)\r\n while s != -1:\r\n e = ttt.find('>',s)\r\n ttt = ttt[:s]+client.get_user(int(ttt[s+len(id_start):e])).name+ttt[e:]\r\n s = ttt.find(id_start)\r\n body = {\r\n 'q': ttt,\r\n 'langpair': lang+'|en' if len(lang) == 2 else lang[:2]+'|'+lang[2:],\r\n 'de': CONTACT_EMAIL\r\n }\r\n r = requests.get('https://api.mymemory.translated.net/get', params=body)\r\n \r\n message_sent = await message.channel.send(unescape(r.json()['responseData']['translatedText']))\r\n \r\n def check(reaction, user):\r\n return user == message.author and str(reaction.emoji) == '❌'\r\n \r\n try:\r\n reaction, user = await client.wait_for('reaction_add', timeout=600.0, check=check)\r\n except asyncio.TimeoutError:\r\n pass\r\n else:\r\n await message_sent.delete()\r\n\r\nclient.run(TOKEN)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from IPython import embed
from selenium import webdriver
b = webdriver.Firefox()
embed()
|
normal
|
{
"blob_id": "9aa54f1259aceb052cfba74cedcfadfe68778ebd",
"index": 1020,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nembed()\n",
"step-3": "<mask token>\nb = webdriver.Firefox()\nembed()\n",
"step-4": "from IPython import embed\nfrom selenium import webdriver\nb = webdriver.Firefox()\nembed()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# this is just to test with ilp_polytope
import polytope
polytope.ilp_polytope.test2()
|
normal
|
{
"blob_id": "d2fce15636e43ca618c39c5c963bbf0c3a6a3886",
"index": 4444,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npolytope.ilp_polytope.test2()\n",
"step-3": "import polytope\npolytope.ilp_polytope.test2()\n",
"step-4": "# this is just to test with ilp_polytope\nimport polytope\n\npolytope.ilp_polytope.test2()\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check_ok(boat, taken_positions):
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
cond = 0
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1
else:
comp.append(shot)
cond = 2
sinked_boats += 1
if cond == 0:
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
<|reserved_special_token_0|>
def check_empty(ships):
return all([(not elem) for elem in ships])
<|reserved_special_token_0|>
def create_ships_u(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
<|reserved_special_token_0|>
def create_ships_c(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
boat_position = [-1]
while -1 in boat_position:
boat_start = randrange(99)
boat_direction = randrange(1, 4)
boat_position = create_boat(len_of_boat, boat_start,
boat_direction, taken_positions)
ships.append(boat_position)
taken_positions += boat_position
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print('incorrect - please enter integer only')
return shot, guesses
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_ok(boat, taken_positions):
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
cond = 0
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1
else:
comp.append(shot)
cond = 2
sinked_boats += 1
if cond == 0:
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
print(' battleship')
print(' 0 1 2 3 4 5 6 7 8 9')
block = 0
for i in range(10):
row = ''
for j in range(10):
character = '_ '
if block in miss:
character = 'x '
elif block in hit:
character = 'o '
elif block in comp:
character = 'Q '
row += character
block += 1
print(i, ' ', row)
print('')
def check_empty(ships):
return all([(not elem) for elem in ships])
<|reserved_special_token_0|>
def create_ships_u(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
<|reserved_special_token_0|>
def get_shot_user(guesses):
while True:
try:
shot = int(input('Enter your shot: '))
if shot < 0 or shot > 99:
shot = int(input('Enter your shot:'))
elif shot in guesses:
print('already guessed - please enter again')
else:
return shot
except:
print('incorrect - please enter integer only')
<|reserved_special_token_0|>
def create_ships_c(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
boat_position = [-1]
while -1 in boat_position:
boat_start = randrange(99)
boat_direction = randrange(1, 4)
boat_position = create_boat(len_of_boat, boat_start,
boat_direction, taken_positions)
ships.append(boat_position)
taken_positions += boat_position
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print('incorrect - please enter integer only')
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
temp = []
if len(tactics) < 1:
temp = [shot - 1, shot + 1, shot - 10, shot + 10]
elif shot - 1 in hit:
temp = [shot + 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 1 in hit:
temp = [shot - 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot + num not in hit:
temp.append(shot + num)
break
elif shot - 10 in hit:
temp = [shot + 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 10 in hit:
temp = [shot - 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot + num not in hit:
temp.append(shot + num)
break
candidate = []
for i in range(len(temp)):
if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:
candidate.append(temp[i])
random.shuffle(candidate)
return candidate
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_ok(boat, taken_positions):
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
cond = 0
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1
else:
comp.append(shot)
cond = 2
sinked_boats += 1
if cond == 0:
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
print(' battleship')
print(' 0 1 2 3 4 5 6 7 8 9')
block = 0
for i in range(10):
row = ''
for j in range(10):
character = '_ '
if block in miss:
character = 'x '
elif block in hit:
character = 'o '
elif block in comp:
character = 'Q '
row += character
block += 1
print(i, ' ', row)
print('')
def check_empty(ships):
return all([(not elem) for elem in ships])
<|reserved_special_token_0|>
def create_ships_u(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
<|reserved_special_token_0|>
def get_ship(len_of_boat, taken_positions):
while True:
ship = []
print('enter your ship of length', len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input('please enter a number: ')
ship.append(int(boat_num))
except ValueError:
print('wrong type of input')
continue
else:
break
ship = check_ok(ship, taken_positions)
if -1 not in ship:
taken_positions += ship
break
else:
print('invalid number - please enter again')
return ship, taken_positions
def get_shot_user(guesses):
while True:
try:
shot = int(input('Enter your shot: '))
if shot < 0 or shot > 99:
shot = int(input('Enter your shot:'))
elif shot in guesses:
print('already guessed - please enter again')
else:
return shot
except:
print('incorrect - please enter integer only')
<|reserved_special_token_0|>
def create_ships_c(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
boat_position = [-1]
while -1 in boat_position:
boat_start = randrange(99)
boat_direction = randrange(1, 4)
boat_position = create_boat(len_of_boat, boat_start,
boat_direction, taken_positions)
ships.append(boat_position)
taken_positions += boat_position
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print('incorrect - please enter integer only')
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
temp = []
if len(tactics) < 1:
temp = [shot - 1, shot + 1, shot - 10, shot + 10]
elif shot - 1 in hit:
temp = [shot + 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 1 in hit:
temp = [shot - 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot + num not in hit:
temp.append(shot + num)
break
elif shot - 10 in hit:
temp = [shot + 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 10 in hit:
temp = [shot - 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot + num not in hit:
temp.append(shot + num)
break
candidate = []
for i in range(len(temp)):
if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:
candidate.append(temp[i])
random.shuffle(candidate)
return candidate
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_ok(boat, taken_positions):
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
cond = 0
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1
else:
comp.append(shot)
cond = 2
sinked_boats += 1
if cond == 0:
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
print(' battleship')
print(' 0 1 2 3 4 5 6 7 8 9')
block = 0
for i in range(10):
row = ''
for j in range(10):
character = '_ '
if block in miss:
character = 'x '
elif block in hit:
character = 'o '
elif block in comp:
character = 'Q '
row += character
block += 1
print(i, ' ', row)
print('')
def check_empty(ships):
return all([(not elem) for elem in ships])
<|reserved_special_token_0|>
def create_ships_u(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(' battleships ')
print(' 0 1 2 3 4 5 6 7 8 9')
place = 0
for x in range(10):
row = ''
for y in range(10):
ch = ' _ '
if place in taken_positions:
ch = ' o '
row = row + ch
place = place + 1
print(x, ' ', row)
def get_ship(len_of_boat, taken_positions):
while True:
ship = []
print('enter your ship of length', len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input('please enter a number: ')
ship.append(int(boat_num))
except ValueError:
print('wrong type of input')
continue
else:
break
ship = check_ok(ship, taken_positions)
if -1 not in ship:
taken_positions += ship
break
else:
print('invalid number - please enter again')
return ship, taken_positions
def get_shot_user(guesses):
while True:
try:
shot = int(input('Enter your shot: '))
if shot < 0 or shot > 99:
shot = int(input('Enter your shot:'))
elif shot in guesses:
print('already guessed - please enter again')
else:
return shot
except:
print('incorrect - please enter integer only')
<|reserved_special_token_0|>
def create_ships_c(taken_positions, num_boats):
ships = []
for len_of_boat in num_boats:
boat_position = [-1]
while -1 in boat_position:
boat_start = randrange(99)
boat_direction = randrange(1, 4)
boat_position = create_boat(len_of_boat, boat_start,
boat_direction, taken_positions)
ships.append(boat_position)
taken_positions += boat_position
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print('incorrect - please enter integer only')
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
temp = []
if len(tactics) < 1:
temp = [shot - 1, shot + 1, shot - 10, shot + 10]
elif shot - 1 in hit:
temp = [shot + 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 1 in hit:
temp = [shot - 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot + num not in hit:
temp.append(shot + num)
break
elif shot - 10 in hit:
temp = [shot + 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 10 in hit:
temp = [shot - 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot + num not in hit:
temp.append(shot + num)
break
candidate = []
for i in range(len(temp)):
if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:
candidate.append(temp[i])
random.shuffle(candidate)
return candidate
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from random import randrange
import random
"""
both user and computer funcs:
"""
def check_ok(boat, taken_positions):
# input: boat, taken_positions
# this func checks if the boat outside the playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
#this condition checks if the block boat[i] is already in the list taken_positions
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
#this condition checks border 1 and 3
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
#this condition checks border 2 and 4
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
# this condition checks if there is any hole in the boat
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
# input: shot, all the boats (ships), hit, miss, comp, sinked_boats
# this func initially assumes that the shot is missed (cond = 0)
# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships
# if yes, remove the block of the boat that is hitted by the shot
# append the shot to hit or comp. If comp, sinked_boats += 1
# if not, append the shot to miss
# return: all the boats (ships), hit, miss, comp, cond, sinked_boats
cond = 0 # miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def get_shot_user(guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)
# in the first mơve, tactics = []
# this func checks if len(tactics) > 0
# if yes, pick shot = tactics[0]
# if no, pick shot = randrange(99)
# this func check if shot not in guesses(which is the list of all moves)
# if yes, guess.append(shot), and break
# return: the valid shot, guesses
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print("incorrect - please enter integer only")
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
# input: shot, tactics, guesses, hit
# this function takes the newly shot, and changes the tactics list accordingly
# the list temp is the possible positions that the next shot can be
# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot
# else, the list temp will be created based on the last 2 shots
# candidate is the list of valid possible shots that is created from temp
# shuffle the order of elements inside candidate
# return: candidate (candidate is tactics)
temp = []
if len(tactics) < 1:
# got 1 hit the first time
temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be
else:
# got at least 2 hits
# checks to see if the 4 spots around is in hit
if shot - 1 in hit: # east
temp = [shot + 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 1 in hit: # west
temp = [shot - 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot + num not in hit:
temp.append(shot + num)
break
elif shot - 10 in hit: # south
temp = [shot + 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40
temp = [shot - 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot + num not in hit:
temp.append(shot + num)
break
candidate = [] # list of valid places that the next shot could be
for i in range(len(temp)):
if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp
candidate.append(temp[i])
random.shuffle(candidate) # shuffle the element order of the list candidate
return candidate
"""
main program:
"""
num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length
# before game
# computer - 1
hit1 = []
miss1 = []
comp1 = []
guesses1 = []
cond1 = 0
tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to []
taken_positions1 = []
sinked_boats1 = []
# user - 2
hit2 = []
miss2 = []
comp2 = []
guesses2 = []
cond2 = 0
tactics2 = []
taken_positions2 = []
sinked_boats2 = []
# computer creates ships for player 1
ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats)
# user creates boat for player 2 - show board
ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats)
create_playground_u(taken_positions2)
# loop for user and computer takes turn to shoot, and repeat until finding a winner:
turns = 0
while True:
turns += 1
# USER SHOOTS: using 1 because it is checking the data of computer
guesses1 = hit1 + miss1 + comp1
shot1 = get_shot_user(guesses1)
ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1)
create_playground(hit1, miss1, comp1)
# check if all of the computer ships are empty:
if check_empty(ships1):
print("end of game - winner in", turns)
break
# COMPUTER SHOOTS:
guesses2 = hit2 + miss2 + comp2
shot2, guesses2 = get_shot_comp(guesses2, tactics2)
ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2)
create_playground(hit2, miss2, comp2)
if cond2 == 1:
# got 1 hit
tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2)
elif cond2 == 2:
# comp, and sunk the boat
# reset tactics = []
tactics2 = []
elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves
# got 1 hit, then miss
# remove the newly shot from tactics
tactics2.pop(0)
# in case all 3 statements above are False, which means there is no hit in the first place, tactics is still []
# check if all of the computer ships are empty:
if check_empty(ships2):
print("end of game - computer wins in", turns)
break
# after both the user and computer shoot, start a new loop:
|
flexible
|
{
"blob_id": "95584dfdb232be7f507dc9d29ed2f1d95fa2b653",
"index": 9642,
"step-1": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\n<mask token>\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\ndef create_playground_u(taken_positions):\n print(' battleships ')\n print(' 0 1 2 3 4 5 6 7 8 9')\n place = 0\n for x in range(10):\n row = ''\n for y in range(10):\n ch = ' _ '\n if place in taken_positions:\n ch = ' o '\n row = row + ch\n place = place + 1\n print(x, ' ', row)\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-5": "from random import randrange\r\nimport random\r\n\r\n\"\"\"\r\nboth user and computer funcs:\r\n\"\"\"\r\ndef check_ok(boat, taken_positions):\r\n# input: boat, taken_positions \r\n# this func checks if the boat outside the playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat.sort()\r\n for i in range(len(boat)):\r\n if boat[i] in taken_positions:\r\n #this condition checks if the block boat[i] is already in the list taken_positions\r\n boat = [-1]\r\n break \r\n elif boat[i] > 99 or boat[i] < 0:\r\n #this condition checks border 1 and 3\r\n boat = [-1]\r\n break\r\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\r\n #this condition checks border 2 and 4\r\n if boat[i + 1] % 10 == 0:\r\n boat = [-1]\r\n break\r\n \r\n if i != 0:\r\n # this condition checks if there is any hole in the boat\r\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\r\n boat = [-1]\r\n break\r\n return boat \r\n\r\n\r\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\r\n# input: shot, all the boats (ships), hit, miss, comp, sinked_boats\r\n# this func initially assumes that the shot is missed (cond = 0)\r\n# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships \r\n# if yes, remove the block of the boat that is hitted by the shot\r\n# append the shot to hit or comp. If comp, sinked_boats += 1\r\n# if not, append the shot to miss\r\n# return: all the boats (ships), hit, miss, comp, cond, sinked_boats\r\n cond = 0 # miss\r\n for i in range(len(ships)):\r\n if shot in ships[i]:\r\n ships[i].remove(shot)\r\n if len(ships[i]) > 0:\r\n hit.append(shot)\r\n cond = 1 # hit\r\n else:\r\n comp.append(shot)\r\n cond = 2 # comp\r\n sinked_boats += 1 \r\n if cond == 0: # miss\r\n miss.append(shot) \r\n return ships, hit, miss, comp, cond, sinked_boats\r\n\r\n\r\ndef create_playground(hit, miss, comp):\r\n# input: hit, miss, comp\r\n# this func creates the playground with the status of each block \r\n# print the playground\r\n print(\" battleship\")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n block = 0 #this variable keep track of the spot of the block\r\n for i in range(10):\r\n #create each row\r\n row = \"\"\r\n for j in range(10):\r\n #create each spot on the specific row\r\n character = \"_ \"\r\n if block in miss:\r\n character = \"x \"\r\n elif block in hit:\r\n character = \"o \" \r\n elif block in comp:\r\n character = \"Q \"\r\n row += character\r\n block += 1 #the block var increments 1 after each character is add to row\r\n print(i, \" \", row)\r\n print(\"\")\r\n\r\n\r\ndef check_empty(ships):\r\n# input: ships\r\n# [] = False, [#have element] = True\r\n# this func checks each ship in the 2D list ships\r\n# if ship is empty, return True, and vice versa\r\n# if all ships are empty, return True, else return False\r\n# return True or False \r\n return all([not elem for elem in ships])\r\n\r\n\r\n\"\"\"\r\nuser - 2 funcs:\r\n\"\"\"\r\ndef create_ships_u(taken_positions, num_boats):\r\n# input: num_boats\r\n# this func has a loop that makes all boats,\r\n# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\r\n ships.append(ship)\r\n return ships, taken_positions\r\n\r\n \r\ndef create_playground_u(taken_positions):\r\n print(\" battleships \")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n place = 0\r\n for x in range(10):\r\n row = \"\"\r\n for y in range(10):\r\n ch = \" _ \"\r\n if place in taken_positions:\r\n ch = \" o \" \r\n row = row + ch\r\n place = place + 1\r\n \r\n print(x,\" \",row)\r\n\r\n\r\ndef get_ship(len_of_boat, taken_positions):\r\n# input: len_of_boat, taken_positions\r\n# this func gets the boat's position from the user's input\r\n# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order \r\n# return a valid ship \r\n while True:\r\n ship = []\r\n print(\"enter your ship of length\", len_of_boat)\r\n for i in range(len_of_boat):\r\n while True:\r\n try:\r\n boat_num = input(\"please enter a number: \")\r\n ship.append(int(boat_num))\r\n except ValueError: # better try again... Return to the start of the loop\r\n print(\"wrong type of input\")\r\n continue\r\n else: # is is a correct input, and we're ready to exit the loop\r\n break\r\n ship = check_ok(ship, taken_positions)\r\n\r\n if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break\r\n taken_positions += ship\r\n break\r\n else:\r\n print(\"invalid number - please enter again\")\r\n return ship, taken_positions\r\n\r\n\r\ndef get_shot_user(guesses):\r\n# input: guesses is the combined list of hit, miss, comp\r\n# this funcs asks the user to enter the shot, then checks the validity of the shot \r\n# return: the valid shot\r\n while True:\r\n try:\r\n shot = int(input(\"Enter your shot: \"))\r\n if shot < 0 or shot > 99:\r\n shot = int(input(\"Enter your shot:\"))\r\n elif shot in guesses:\r\n print(\"already guessed - please enter again\")\r\n else:\r\n return shot\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n\r\n\r\n\"\"\"\r\ncomputer - 1 funcs:\r\n\"\"\"\r\ndef create_ships_c(taken_positions, num_boats):\r\n# input: num_boats\r\n# this funcs has a loop that makes all boats,\r\n# which calls the create_boat() that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n boat_position = [-1] #create the initial position of every boat is [-1]\r\n while -1 in boat_position:\r\n boat_start = randrange(99) #boat starting point\r\n boat_direction = randrange(1, 4) #{1: \"up\", 2: \"right\", 3: \"down\", 4: \"left\"}\r\n boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat\r\n #a new boat is created after finishing the while loop\r\n ships.append(boat_position)\r\n taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions\r\n return ships, taken_positions\r\n\r\n\r\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\r\n# input: len_of_boat, boat_start, boat_direction, taken_positions\r\n# this func initializes boat = []\r\n# with len_of_boat, boat_start, boat_direction, this func create the position of the boat\r\n# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat = []\r\n if boat_direction == 1:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i * 10) # already have the position of boat after this line\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 2:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 3:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i * 10)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 4:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i)\r\n boat = check_ok(boat, taken_positions)\r\n return boat\r\n\r\n\r\ndef get_shot_comp(guesses, tactics):\r\n# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)\r\n# in the first mơve, tactics = []\r\n# this func checks if len(tactics) > 0\r\n# if yes, pick shot = tactics[0]\r\n# if no, pick shot = randrange(99)\r\n# this func check if shot not in guesses(which is the list of all moves) \r\n# if yes, guess.append(shot), and break\r\n# return: the valid shot, guesses\r\n while True:\r\n try:\r\n if len(tactics) > 0:\r\n shot = tactics[0]\r\n else:\r\n shot = randrange(99)\r\n \r\n if shot not in guesses:\r\n guesses.append(shot)\r\n break\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n return shot, guesses\r\n\r\n\r\ndef calculate_tactics(shot, tactics, guesses, hit):\r\n# input: shot, tactics, guesses, hit\r\n# this function takes the newly shot, and changes the tactics list accordingly\r\n# the list temp is the possible positions that the next shot can be\r\n# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot\r\n# else, the list temp will be created based on the last 2 shots\r\n# candidate is the list of valid possible shots that is created from temp\r\n# shuffle the order of elements inside candidate\r\n# return: candidate (candidate is tactics)\r\n temp = []\r\n if len(tactics) < 1:\r\n # got 1 hit the first time \r\n temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be \r\n else: \r\n # got at least 2 hits \r\n # checks to see if the 4 spots around is in hit\r\n if shot - 1 in hit: # east\r\n temp = [shot + 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n\r\n elif shot + 1 in hit: # west\r\n temp = [shot - 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n elif shot - 10 in hit: # south\r\n temp = [shot + 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n \r\n elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40\r\n temp = [shot - 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n candidate = [] # list of valid places that the next shot could be\r\n for i in range(len(temp)):\r\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp\r\n candidate.append(temp[i])\r\n random.shuffle(candidate) # shuffle the element order of the list candidate\r\n return candidate\r\n\r\n\r\n\r\n\"\"\"\r\nmain program:\r\n\"\"\"\r\nnum_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length \r\n\r\n# before game\r\n# computer - 1\r\nhit1 = []\r\nmiss1 = []\r\ncomp1 = []\r\nguesses1 = []\r\ncond1 = 0\r\ntactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to []\r\ntaken_positions1 = []\r\nsinked_boats1 = []\r\n\r\n# user - 2\r\nhit2 = []\r\nmiss2 = []\r\ncomp2 = []\r\nguesses2 = []\r\ncond2 = 0\r\ntactics2 = []\r\ntaken_positions2 = []\r\nsinked_boats2 = []\r\n\r\n# computer creates ships for player 1\r\nships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) \r\n# user creates boat for player 2 - show board\r\nships2, taken_positions2 = create_ships_u(taken_positions2, num_boats)\r\ncreate_playground_u(taken_positions2)\r\n\r\n# loop for user and computer takes turn to shoot, and repeat until finding a winner:\r\nturns = 0\r\nwhile True: \r\n turns += 1\r\n\r\n# USER SHOOTS: using 1 because it is checking the data of computer\r\n guesses1 = hit1 + miss1 + comp1\r\n shot1 = get_shot_user(guesses1)\r\n ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1)\r\n create_playground(hit1, miss1, comp1)\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships1):\r\n print(\"end of game - winner in\", turns)\r\n break\r\n\r\n# COMPUTER SHOOTS:\r\n guesses2 = hit2 + miss2 + comp2\r\n shot2, guesses2 = get_shot_comp(guesses2, tactics2) \r\n ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2)\r\n create_playground(hit2, miss2, comp2)\r\n\r\n if cond2 == 1:\r\n # got 1 hit\r\n tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2)\r\n elif cond2 == 2:\r\n # comp, and sunk the boat\r\n # reset tactics = []\r\n tactics2 = []\r\n elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves\r\n # got 1 hit, then miss\r\n # remove the newly shot from tactics\r\n tactics2.pop(0)\r\n # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still []\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships2):\r\n print(\"end of game - computer wins in\", turns)\r\n break\r\n\r\n# after both the user and computer shoot, start a new loop:\r\n\r\n",
"step-ids": [
7,
10,
11,
12,
16
]
}
|
[
7,
10,
11,
12,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def firstMissingPositive(nums):
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
nums.sort()
current = 1
nums = [ele for ele in nums if ele > 0]
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
for i in range(len(nums) - 1):
if current != nums[i]:
return 1
else:
while i < len(nums) - 1 and (nums[i] + 1 == nums[i + 1] or nums
[i] == nums[i + 1]):
i += 1
if i == len(nums) - 2 and nums[i] + 1 == nums[i + 1]:
return nums[i + 1] + 1
else:
return nums[i] + 1
return 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def firstMissingPositive(nums):
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
nums.sort()
current = 1
nums = [ele for ele in nums if ele > 0]
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
for i in range(len(nums) - 1):
if current != nums[i]:
return 1
else:
while i < len(nums) - 1 and (nums[i] + 1 == nums[i + 1] or nums
[i] == nums[i + 1]):
i += 1
if i == len(nums) - 2 and nums[i] + 1 == nums[i + 1]:
return nums[i + 1] + 1
else:
return nums[i] + 1
return 1
print(firstMissingPositive([1, 1000]))
print(firstMissingPositive([1, 0]))
print(firstMissingPositive([-1, -2]))
print(firstMissingPositive([1, 2, 0]))
print(firstMissingPositive([3, 4, -1, 1]))
print(firstMissingPositive([7, 8, 9, 11, 12]))
|
flexible
|
{
"blob_id": "89addbf2c49d568250cd5a48d3fdb73914ce50c4",
"index": 2899,
"step-1": "<mask token>\n",
"step-2": "def firstMissingPositive(nums):\n if len(nums) == 0:\n return 1\n if len(nums) == 1:\n if nums[0] == 1:\n return 2\n else:\n return 1\n nums.sort()\n current = 1\n nums = [ele for ele in nums if ele > 0]\n if len(nums) == 0:\n return 1\n if len(nums) == 1:\n if nums[0] == 1:\n return 2\n else:\n return 1\n for i in range(len(nums) - 1):\n if current != nums[i]:\n return 1\n else:\n while i < len(nums) - 1 and (nums[i] + 1 == nums[i + 1] or nums\n [i] == nums[i + 1]):\n i += 1\n if i == len(nums) - 2 and nums[i] + 1 == nums[i + 1]:\n return nums[i + 1] + 1\n else:\n return nums[i] + 1\n return 1\n\n\n<mask token>\n",
"step-3": "def firstMissingPositive(nums):\n if len(nums) == 0:\n return 1\n if len(nums) == 1:\n if nums[0] == 1:\n return 2\n else:\n return 1\n nums.sort()\n current = 1\n nums = [ele for ele in nums if ele > 0]\n if len(nums) == 0:\n return 1\n if len(nums) == 1:\n if nums[0] == 1:\n return 2\n else:\n return 1\n for i in range(len(nums) - 1):\n if current != nums[i]:\n return 1\n else:\n while i < len(nums) - 1 and (nums[i] + 1 == nums[i + 1] or nums\n [i] == nums[i + 1]):\n i += 1\n if i == len(nums) - 2 and nums[i] + 1 == nums[i + 1]:\n return nums[i + 1] + 1\n else:\n return nums[i] + 1\n return 1\n\n\nprint(firstMissingPositive([1, 1000]))\nprint(firstMissingPositive([1, 0]))\nprint(firstMissingPositive([-1, -2]))\nprint(firstMissingPositive([1, 2, 0]))\nprint(firstMissingPositive([3, 4, -1, 1]))\nprint(firstMissingPositive([7, 8, 9, 11, 12]))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
websocket_urlpatterns = [url('^account/home', consumers.
NotificationConsumer), url('^fund/(?P<fund>[\\w-]+)', consumers.
NotificationConsumer), url('^websockets', consumers.StreamConsumer)]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [url('^account/home', consumers.
NotificationConsumer), url('^fund/(?P<fund>[\\w-]+)', consumers.
NotificationConsumer), url('^websockets', consumers.StreamConsumer)]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^account/home', consumers.NotificationConsumer),
url(r'^fund/(?P<fund>[\w-]+)', consumers.NotificationConsumer),
url(r'^websockets', consumers.StreamConsumer),
]
|
flexible
|
{
"blob_id": "7ab9c530035185ee2250f3f6ce8cde87bdfd9803",
"index": 5295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwebsocket_urlpatterns = [url('^account/home', consumers.\n NotificationConsumer), url('^fund/(?P<fund>[\\\\w-]+)', consumers.\n NotificationConsumer), url('^websockets', consumers.StreamConsumer)]\n",
"step-3": "from django.conf.urls import url\nfrom . import consumers\nwebsocket_urlpatterns = [url('^account/home', consumers.\n NotificationConsumer), url('^fund/(?P<fund>[\\\\w-]+)', consumers.\n NotificationConsumer), url('^websockets', consumers.StreamConsumer)]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import consumers\n\nwebsocket_urlpatterns = [\n url(r'^account/home', consumers.NotificationConsumer),\n url(r'^fund/(?P<fund>[\\w-]+)', consumers.NotificationConsumer),\n url(r'^websockets', consumers.StreamConsumer),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
for i in sys.stdin:
test_num = int(i)
if test_num == 0:
break
count = 11
while digit_sum(test_num) != digit_sum(count * test_num):
count = count + 1
print('{}'.format(count))
<|reserved_special_token_1|>
import sys
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
for i in sys.stdin:
test_num = int(i)
if test_num == 0:
break
count = 11
while digit_sum(test_num) != digit_sum(count * test_num):
count = count + 1
print('{}'.format(count))
|
flexible
|
{
"blob_id": "0d37b6f0ea8854f9d4d4cd2ff235fa39bab7cc12",
"index": 6549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-4": "import sys\n\n\ndef digit_sum(x):\n sum = 0\n while x != 0:\n sum = sum + x % 10\n x = x // 10\n return sum\n\n\nfor i in sys.stdin:\n test_num = int(i)\n if test_num == 0:\n break\n count = 11\n while digit_sum(test_num) != digit_sum(count * test_num):\n count = count + 1\n print('{}'.format(count))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# Rhino Motor Driver (RMCS 2303) - Basic Modbus Communication
# -----------------------------------------------------------
"""
BSD 3-Clause License
Copyright (c) 2021, Rajesh Subramanian
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import time
import traceback
import minimalmodbus as modbus
import rhino_params as rhino
class Controller:
def __init__(self, port_name, slave_address):
# Parameters
self.__instrument = modbus.Instrument(port_name, slave_address, modbus.MODE_ASCII)
self.__instrument.serial.baudrate = 9600
self.__instrument.serial.parity = modbus.serial.PARITY_NONE
self.__instrument.bytesize = 8
self.__instrument.stopbits = 1
self.__instrument.timeout = 5 # seconds
self.__instrument.write_timeout = 5 # seconds
self.__instrument.clear_buffers_before_each_transaction = True
# self.__instrument.close_port_after_each_call = True
self.__time_delay = 0.001 #0.001 # default: 1 ms
self.__lock_resource = False # To prevent issuing simultaneous commands to RMCS2303 motor controller. Eg.
# trying to read encoder value while writing motor enable command
self.name = self.extract_name_from_port_name(port_name)
self.__status_rotation_direction = 0
self.__CW = 1 # clockwise rotation status
self.__CCW = -1 # counter clockwise rotation status
self.__IDLE = 0 # no rotation status
# Functions
self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)
self.brake()
self.__go_home()
self.set_acceleration(rhino.ACCELERATION_DEFAULT)
self.set_speed(rhino.SPEED_DEFAULT)
# Private Functions
# -----------------
@staticmethod
def __convert_unsigned32_to_signed32(unsigned32_data):
# UInt32 range: 0 to 4294967295
# Int32 range: -2147483648 to 2147483647
mid_uint32 = 2147483648
if unsigned32_data is not None:
signed32_data = int(unsigned32_data - mid_uint32)
return signed32_data
@staticmethod
def __convert_signed32_to_signed16(signed32_data):
# Int16 range: -32768 to 32767
signed16_data = signed32_data >> 16
return signed16_data
def __read_from_register(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
data = self.__instrument.read_register(message_list[0], message_list[1], message_list[2])
time.sleep(self.__time_delay)
self.__lock_resource = False
return data
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __read_from_registers(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
register_size = 16
data = self.__instrument.read_registers(message_list[0], message_list[1], message_list[2])
lsb = data[0]
msb = data[1]
combined_data = (msb << register_size) + lsb # combining two 16 bit values into one 32 bit value
time.sleep(self.__time_delay)
self.__lock_resource = False
return combined_data
'''
# combine two registers and create a long integer
def combine_two_registers(self, reg):
if reg[1] > 32767:
long_reg = (65535 - reg[1])
b = long_reg << 16
out = (b + 65535 - reg[0]) * -1
else:
long_reg = reg[1]
b = long_reg << 16
out = b + reg[0]
return out
'''
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __write_to_register(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
self.__instrument.write_register(message_list[0], message_list[1], message_list[2], message_list[3])
time.sleep(self.__time_delay)
self.__lock_resource = False
return
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __go_home(self):
message = rhino.HOME_POSITION_MESSAGE
self.__write_to_register(message)
def __set_lines_per_rotation(self, lines_per_rotation):
message = rhino.LINES_PER_ROTATION_MESSAGE
message[rhino.DATA_INDEX] = lines_per_rotation
self.__write_to_register(message)
# Public Functions
# ----------------
@staticmethod
def extract_name_from_port_name(port_name):
chars = port_name.split("/")
name = chars[len(chars) - 1]
return name
@staticmethod
def convert_rad_per_sec_to_rpm(radians_per_sec):
# Formula: rpm = rad/sec * 9.549297
rpm = radians_per_sec * 9.549297
rpm_scaled = rpm * rhino.GEAR_RATIO
return rpm_scaled
@staticmethod
def convert_rpm_to_rad_per_sec(rpm):
# Formula: rad/sec = rpm * 0.10472
radians_per_sec = rpm * 0.10472
radians_per_sec_scaled = radians_per_sec / rhino.GEAR_RATIO
return radians_per_sec_scaled
def set_speed(self, speed):
speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))
if speed_rpm > rhino.SPEED_MAX:
speed_rpm = rhino.SPEED_MAX
if speed_rpm < rhino.SPEED_MIN:
speed_rpm = rhino.SPEED_MIN
message = rhino.SPEED_MESSAGE
message[rhino.DATA_INDEX] = speed_rpm
self.__write_to_register(message)
def set_acceleration(self, acceleration):
if acceleration > rhino.ACCELERATION_MAX:
acceleration = rhino.ACCELERATION_MAX
if acceleration < rhino.ACCELERATION_MIN:
acceleration = rhino.ACCELERATION_MIN
message = rhino.ACCELERATION_MESSAGE
message[rhino.DATA_INDEX] = acceleration
self.__write_to_register(message)
def turn_motor_cw(self):
message = rhino.TURN_MOTOR_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__CW
def turn_motor_ccw(self):
message = rhino.TURN_MOTOR_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__CCW
def stop_rotation_cw(self):
message = rhino.STOP_MOTOR_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def stop_rotation_ccw(self):
message = rhino.STOP_MOTOR_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def stop_rotation(self):
message = rhino.STOP_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def emergency_stop(self):
message = rhino.EMERGENCY_STOP_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def get_position_32bit(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
# position = self.__convert_unsigned32_to_signed32(position)
return position
def get_position_16bit(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
position_32bit = self.__convert_unsigned32_to_signed32(position)
position_16bit = self.__convert_signed32_to_signed16(position_32bit)
return position_16bit
def get_position_raw(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
return position
def get_speed(self):
message = rhino.SPEED_FEEDBACK_MESSAGE
speed = self.__read_from_register(message)
speed = self.__convert_unsigned32_to_signed32(speed)
return speed
def brake_cw(self):
message = rhino.BRAKE_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def brake_ccw(self):
message = rhino.BRAKE_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def brake(self):
if self.__status_rotation_direction == self.__CW:
self.brake_cw()
print(self.name + ": Brake CW")
self.__status_rotation_direction = self.__IDLE
elif self.__status_rotation_direction == self.__CCW:
self.brake_ccw()
print(self.name + ": Brake CCW")
self.__status_rotation_direction = self.__IDLE
elif self.__status_rotation_direction == self.__IDLE:
print(self.name + ": Motor idle")
else:
print(self.name + ": Motor Unknown Rotation Status")
|
normal
|
{
"blob_id": "df3dcbf3c8d621f5db2a07765a0a28e7626387d9",
"index": 3485,
"step-1": "<mask token>\n\n\nclass Controller:\n\n def __init__(self, port_name, slave_address):\n self.__instrument = modbus.Instrument(port_name, slave_address,\n modbus.MODE_ASCII)\n self.__instrument.serial.baudrate = 9600\n self.__instrument.serial.parity = modbus.serial.PARITY_NONE\n self.__instrument.bytesize = 8\n self.__instrument.stopbits = 1\n self.__instrument.timeout = 5\n self.__instrument.write_timeout = 5\n self.__instrument.clear_buffers_before_each_transaction = True\n self.__time_delay = 0.001\n self.__lock_resource = False\n self.name = self.extract_name_from_port_name(port_name)\n self.__status_rotation_direction = 0\n self.__CW = 1\n self.__CCW = -1\n self.__IDLE = 0\n self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)\n self.brake()\n self.__go_home()\n self.set_acceleration(rhino.ACCELERATION_DEFAULT)\n self.set_speed(rhino.SPEED_DEFAULT)\n <mask token>\n\n @staticmethod\n def __convert_signed32_to_signed16(signed32_data):\n signed16_data = signed32_data >> 16\n return signed16_data\n <mask token>\n\n def __read_from_registers(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n register_size = 16\n data = self.__instrument.read_registers(message_list[0],\n message_list[1], message_list[2])\n lsb = data[0]\n msb = data[1]\n combined_data = (msb << register_size) + lsb\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return combined_data\n \"\"\"\n # combine two registers and create a long integer\n def combine_two_registers(self, reg):\n if reg[1] > 32767:\n long_reg = (65535 - reg[1])\n b = long_reg << 16\n out = (b + 65535 - reg[0]) * -1\n else:\n long_reg = reg[1]\n b = long_reg << 16\n out = b + reg[0]\n return out\n \"\"\"\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n <mask token>\n\n def __go_home(self):\n message = rhino.HOME_POSITION_MESSAGE\n self.__write_to_register(message)\n\n def __set_lines_per_rotation(self, lines_per_rotation):\n message = rhino.LINES_PER_ROTATION_MESSAGE\n message[rhino.DATA_INDEX] = lines_per_rotation\n self.__write_to_register(message)\n <mask token>\n\n @staticmethod\n def convert_rad_per_sec_to_rpm(radians_per_sec):\n rpm = radians_per_sec * 9.549297\n rpm_scaled = rpm * rhino.GEAR_RATIO\n return rpm_scaled\n <mask token>\n\n def set_speed(self, speed):\n speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))\n if speed_rpm > rhino.SPEED_MAX:\n speed_rpm = rhino.SPEED_MAX\n if speed_rpm < rhino.SPEED_MIN:\n speed_rpm = rhino.SPEED_MIN\n message = rhino.SPEED_MESSAGE\n message[rhino.DATA_INDEX] = speed_rpm\n self.__write_to_register(message)\n\n def set_acceleration(self, acceleration):\n if acceleration > rhino.ACCELERATION_MAX:\n acceleration = rhino.ACCELERATION_MAX\n if acceleration < rhino.ACCELERATION_MIN:\n acceleration = rhino.ACCELERATION_MIN\n message = rhino.ACCELERATION_MESSAGE\n message[rhino.DATA_INDEX] = acceleration\n self.__write_to_register(message)\n <mask token>\n\n def turn_motor_ccw(self):\n message = rhino.TURN_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CCW\n\n def stop_rotation_cw(self):\n message = rhino.STOP_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation_ccw(self):\n message = rhino.STOP_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation(self):\n message = rhino.STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def emergency_stop(self):\n message = rhino.EMERGENCY_STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n <mask token>\n\n def get_position_raw(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n <mask token>\n\n def brake_cw(self):\n message = rhino.BRAKE_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Controller:\n\n def __init__(self, port_name, slave_address):\n self.__instrument = modbus.Instrument(port_name, slave_address,\n modbus.MODE_ASCII)\n self.__instrument.serial.baudrate = 9600\n self.__instrument.serial.parity = modbus.serial.PARITY_NONE\n self.__instrument.bytesize = 8\n self.__instrument.stopbits = 1\n self.__instrument.timeout = 5\n self.__instrument.write_timeout = 5\n self.__instrument.clear_buffers_before_each_transaction = True\n self.__time_delay = 0.001\n self.__lock_resource = False\n self.name = self.extract_name_from_port_name(port_name)\n self.__status_rotation_direction = 0\n self.__CW = 1\n self.__CCW = -1\n self.__IDLE = 0\n self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)\n self.brake()\n self.__go_home()\n self.set_acceleration(rhino.ACCELERATION_DEFAULT)\n self.set_speed(rhino.SPEED_DEFAULT)\n\n @staticmethod\n def __convert_unsigned32_to_signed32(unsigned32_data):\n mid_uint32 = 2147483648\n if unsigned32_data is not None:\n signed32_data = int(unsigned32_data - mid_uint32)\n return signed32_data\n\n @staticmethod\n def __convert_signed32_to_signed16(signed32_data):\n signed16_data = signed32_data >> 16\n return signed16_data\n <mask token>\n\n def __read_from_registers(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n register_size = 16\n data = self.__instrument.read_registers(message_list[0],\n message_list[1], message_list[2])\n lsb = data[0]\n msb = data[1]\n combined_data = (msb << register_size) + lsb\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return combined_data\n \"\"\"\n # combine two registers and create a long integer\n def combine_two_registers(self, reg):\n if reg[1] > 32767:\n long_reg = (65535 - reg[1])\n b = long_reg << 16\n out = (b + 65535 - reg[0]) * -1\n else:\n long_reg = reg[1]\n b = long_reg << 16\n out = b + reg[0]\n return out\n \"\"\"\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n <mask token>\n\n def __go_home(self):\n message = rhino.HOME_POSITION_MESSAGE\n self.__write_to_register(message)\n\n def __set_lines_per_rotation(self, lines_per_rotation):\n message = rhino.LINES_PER_ROTATION_MESSAGE\n message[rhino.DATA_INDEX] = lines_per_rotation\n self.__write_to_register(message)\n <mask token>\n\n @staticmethod\n def convert_rad_per_sec_to_rpm(radians_per_sec):\n rpm = radians_per_sec * 9.549297\n rpm_scaled = rpm * rhino.GEAR_RATIO\n return rpm_scaled\n <mask token>\n\n def set_speed(self, speed):\n speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))\n if speed_rpm > rhino.SPEED_MAX:\n speed_rpm = rhino.SPEED_MAX\n if speed_rpm < rhino.SPEED_MIN:\n speed_rpm = rhino.SPEED_MIN\n message = rhino.SPEED_MESSAGE\n message[rhino.DATA_INDEX] = speed_rpm\n self.__write_to_register(message)\n\n def set_acceleration(self, acceleration):\n if acceleration > rhino.ACCELERATION_MAX:\n acceleration = rhino.ACCELERATION_MAX\n if acceleration < rhino.ACCELERATION_MIN:\n acceleration = rhino.ACCELERATION_MIN\n message = rhino.ACCELERATION_MESSAGE\n message[rhino.DATA_INDEX] = acceleration\n self.__write_to_register(message)\n <mask token>\n\n def turn_motor_ccw(self):\n message = rhino.TURN_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CCW\n\n def stop_rotation_cw(self):\n message = rhino.STOP_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation_ccw(self):\n message = rhino.STOP_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation(self):\n message = rhino.STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def emergency_stop(self):\n message = rhino.EMERGENCY_STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n <mask token>\n\n def get_position_raw(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n\n def get_speed(self):\n message = rhino.SPEED_FEEDBACK_MESSAGE\n speed = self.__read_from_register(message)\n speed = self.__convert_unsigned32_to_signed32(speed)\n return speed\n\n def brake_cw(self):\n message = rhino.BRAKE_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake_ccw(self):\n message = rhino.BRAKE_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Controller:\n\n def __init__(self, port_name, slave_address):\n self.__instrument = modbus.Instrument(port_name, slave_address,\n modbus.MODE_ASCII)\n self.__instrument.serial.baudrate = 9600\n self.__instrument.serial.parity = modbus.serial.PARITY_NONE\n self.__instrument.bytesize = 8\n self.__instrument.stopbits = 1\n self.__instrument.timeout = 5\n self.__instrument.write_timeout = 5\n self.__instrument.clear_buffers_before_each_transaction = True\n self.__time_delay = 0.001\n self.__lock_resource = False\n self.name = self.extract_name_from_port_name(port_name)\n self.__status_rotation_direction = 0\n self.__CW = 1\n self.__CCW = -1\n self.__IDLE = 0\n self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)\n self.brake()\n self.__go_home()\n self.set_acceleration(rhino.ACCELERATION_DEFAULT)\n self.set_speed(rhino.SPEED_DEFAULT)\n\n @staticmethod\n def __convert_unsigned32_to_signed32(unsigned32_data):\n mid_uint32 = 2147483648\n if unsigned32_data is not None:\n signed32_data = int(unsigned32_data - mid_uint32)\n return signed32_data\n\n @staticmethod\n def __convert_signed32_to_signed16(signed32_data):\n signed16_data = signed32_data >> 16\n return signed16_data\n\n def __read_from_register(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n data = self.__instrument.read_register(message_list[0],\n message_list[1], message_list[2])\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return data\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __read_from_registers(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n register_size = 16\n data = self.__instrument.read_registers(message_list[0],\n message_list[1], message_list[2])\n lsb = data[0]\n msb = data[1]\n combined_data = (msb << register_size) + lsb\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return combined_data\n \"\"\"\n # combine two registers and create a long integer\n def combine_two_registers(self, reg):\n if reg[1] > 32767:\n long_reg = (65535 - reg[1])\n b = long_reg << 16\n out = (b + 65535 - reg[0]) * -1\n else:\n long_reg = reg[1]\n b = long_reg << 16\n out = b + reg[0]\n return out\n \"\"\"\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n <mask token>\n\n def __go_home(self):\n message = rhino.HOME_POSITION_MESSAGE\n self.__write_to_register(message)\n\n def __set_lines_per_rotation(self, lines_per_rotation):\n message = rhino.LINES_PER_ROTATION_MESSAGE\n message[rhino.DATA_INDEX] = lines_per_rotation\n self.__write_to_register(message)\n\n @staticmethod\n def extract_name_from_port_name(port_name):\n chars = port_name.split('/')\n name = chars[len(chars) - 1]\n return name\n\n @staticmethod\n def convert_rad_per_sec_to_rpm(radians_per_sec):\n rpm = radians_per_sec * 9.549297\n rpm_scaled = rpm * rhino.GEAR_RATIO\n return rpm_scaled\n\n @staticmethod\n def convert_rpm_to_rad_per_sec(rpm):\n radians_per_sec = rpm * 0.10472\n radians_per_sec_scaled = radians_per_sec / rhino.GEAR_RATIO\n return radians_per_sec_scaled\n\n def set_speed(self, speed):\n speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))\n if speed_rpm > rhino.SPEED_MAX:\n speed_rpm = rhino.SPEED_MAX\n if speed_rpm < rhino.SPEED_MIN:\n speed_rpm = rhino.SPEED_MIN\n message = rhino.SPEED_MESSAGE\n message[rhino.DATA_INDEX] = speed_rpm\n self.__write_to_register(message)\n\n def set_acceleration(self, acceleration):\n if acceleration > rhino.ACCELERATION_MAX:\n acceleration = rhino.ACCELERATION_MAX\n if acceleration < rhino.ACCELERATION_MIN:\n acceleration = rhino.ACCELERATION_MIN\n message = rhino.ACCELERATION_MESSAGE\n message[rhino.DATA_INDEX] = acceleration\n self.__write_to_register(message)\n\n def turn_motor_cw(self):\n message = rhino.TURN_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CW\n\n def turn_motor_ccw(self):\n message = rhino.TURN_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CCW\n\n def stop_rotation_cw(self):\n message = rhino.STOP_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation_ccw(self):\n message = rhino.STOP_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation(self):\n message = rhino.STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def emergency_stop(self):\n message = rhino.EMERGENCY_STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n\n def get_position_16bit(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n position_32bit = self.__convert_unsigned32_to_signed32(position)\n position_16bit = self.__convert_signed32_to_signed16(position_32bit)\n return position_16bit\n\n def get_position_raw(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n\n def get_speed(self):\n message = rhino.SPEED_FEEDBACK_MESSAGE\n speed = self.__read_from_register(message)\n speed = self.__convert_unsigned32_to_signed32(speed)\n return speed\n\n def brake_cw(self):\n message = rhino.BRAKE_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake_ccw(self):\n message = rhino.BRAKE_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Controller:\n\n def __init__(self, port_name, slave_address):\n self.__instrument = modbus.Instrument(port_name, slave_address,\n modbus.MODE_ASCII)\n self.__instrument.serial.baudrate = 9600\n self.__instrument.serial.parity = modbus.serial.PARITY_NONE\n self.__instrument.bytesize = 8\n self.__instrument.stopbits = 1\n self.__instrument.timeout = 5\n self.__instrument.write_timeout = 5\n self.__instrument.clear_buffers_before_each_transaction = True\n self.__time_delay = 0.001\n self.__lock_resource = False\n self.name = self.extract_name_from_port_name(port_name)\n self.__status_rotation_direction = 0\n self.__CW = 1\n self.__CCW = -1\n self.__IDLE = 0\n self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)\n self.brake()\n self.__go_home()\n self.set_acceleration(rhino.ACCELERATION_DEFAULT)\n self.set_speed(rhino.SPEED_DEFAULT)\n\n @staticmethod\n def __convert_unsigned32_to_signed32(unsigned32_data):\n mid_uint32 = 2147483648\n if unsigned32_data is not None:\n signed32_data = int(unsigned32_data - mid_uint32)\n return signed32_data\n\n @staticmethod\n def __convert_signed32_to_signed16(signed32_data):\n signed16_data = signed32_data >> 16\n return signed16_data\n\n def __read_from_register(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n data = self.__instrument.read_register(message_list[0],\n message_list[1], message_list[2])\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return data\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __read_from_registers(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n register_size = 16\n data = self.__instrument.read_registers(message_list[0],\n message_list[1], message_list[2])\n lsb = data[0]\n msb = data[1]\n combined_data = (msb << register_size) + lsb\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return combined_data\n \"\"\"\n # combine two registers and create a long integer\n def combine_two_registers(self, reg):\n if reg[1] > 32767:\n long_reg = (65535 - reg[1])\n b = long_reg << 16\n out = (b + 65535 - reg[0]) * -1\n else:\n long_reg = reg[1]\n b = long_reg << 16\n out = b + reg[0]\n return out\n \"\"\"\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __write_to_register(self, message_list):\n while True:\n try:\n if not self.__lock_resource:\n self.__lock_resource = True\n self.__instrument.write_register(message_list[0],\n message_list[1], message_list[2], message_list[3])\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return\n except KeyboardInterrupt:\n print('Keyboard Interrupt: ' + self.name)\n except modbus.ModbusException as e:\n print('ModbusException at ' + self.name + ': ' + str(e))\n except modbus.serial.SerialException as e:\n print('Modbus Serial Exception at ' + self.name + ': ' + str(e)\n )\n except modbus.InvalidResponseError as e:\n print('Modbus Invalid Response Exception at ' + self.name +\n ': ' + str(e))\n except Exception as e:\n print('Motor Driver Exception at ' + self.name + ': ' + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __go_home(self):\n message = rhino.HOME_POSITION_MESSAGE\n self.__write_to_register(message)\n\n def __set_lines_per_rotation(self, lines_per_rotation):\n message = rhino.LINES_PER_ROTATION_MESSAGE\n message[rhino.DATA_INDEX] = lines_per_rotation\n self.__write_to_register(message)\n\n @staticmethod\n def extract_name_from_port_name(port_name):\n chars = port_name.split('/')\n name = chars[len(chars) - 1]\n return name\n\n @staticmethod\n def convert_rad_per_sec_to_rpm(radians_per_sec):\n rpm = radians_per_sec * 9.549297\n rpm_scaled = rpm * rhino.GEAR_RATIO\n return rpm_scaled\n\n @staticmethod\n def convert_rpm_to_rad_per_sec(rpm):\n radians_per_sec = rpm * 0.10472\n radians_per_sec_scaled = radians_per_sec / rhino.GEAR_RATIO\n return radians_per_sec_scaled\n\n def set_speed(self, speed):\n speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))\n if speed_rpm > rhino.SPEED_MAX:\n speed_rpm = rhino.SPEED_MAX\n if speed_rpm < rhino.SPEED_MIN:\n speed_rpm = rhino.SPEED_MIN\n message = rhino.SPEED_MESSAGE\n message[rhino.DATA_INDEX] = speed_rpm\n self.__write_to_register(message)\n\n def set_acceleration(self, acceleration):\n if acceleration > rhino.ACCELERATION_MAX:\n acceleration = rhino.ACCELERATION_MAX\n if acceleration < rhino.ACCELERATION_MIN:\n acceleration = rhino.ACCELERATION_MIN\n message = rhino.ACCELERATION_MESSAGE\n message[rhino.DATA_INDEX] = acceleration\n self.__write_to_register(message)\n\n def turn_motor_cw(self):\n message = rhino.TURN_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CW\n\n def turn_motor_ccw(self):\n message = rhino.TURN_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CCW\n\n def stop_rotation_cw(self):\n message = rhino.STOP_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation_ccw(self):\n message = rhino.STOP_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation(self):\n message = rhino.STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def emergency_stop(self):\n message = rhino.EMERGENCY_STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def get_position_32bit(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n\n def get_position_16bit(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n position_32bit = self.__convert_unsigned32_to_signed32(position)\n position_16bit = self.__convert_signed32_to_signed16(position_32bit)\n return position_16bit\n\n def get_position_raw(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n\n def get_speed(self):\n message = rhino.SPEED_FEEDBACK_MESSAGE\n speed = self.__read_from_register(message)\n speed = self.__convert_unsigned32_to_signed32(speed)\n return speed\n\n def brake_cw(self):\n message = rhino.BRAKE_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake_ccw(self):\n message = rhino.BRAKE_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake(self):\n if self.__status_rotation_direction == self.__CW:\n self.brake_cw()\n print(self.name + ': Brake CW')\n self.__status_rotation_direction = self.__IDLE\n elif self.__status_rotation_direction == self.__CCW:\n self.brake_ccw()\n print(self.name + ': Brake CCW')\n self.__status_rotation_direction = self.__IDLE\n elif self.__status_rotation_direction == self.__IDLE:\n print(self.name + ': Motor idle')\n else:\n print(self.name + ': Motor Unknown Rotation Status')\n",
"step-5": "#!/usr/bin/env python3\n\n# Rhino Motor Driver (RMCS 2303) - Basic Modbus Communication\n# -----------------------------------------------------------\n\n\"\"\"\n BSD 3-Clause License\n\n Copyright (c) 2021, Rajesh Subramanian\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport time\nimport traceback\nimport minimalmodbus as modbus\nimport rhino_params as rhino\n\n\nclass Controller:\n def __init__(self, port_name, slave_address):\n # Parameters\n self.__instrument = modbus.Instrument(port_name, slave_address, modbus.MODE_ASCII)\n self.__instrument.serial.baudrate = 9600\n self.__instrument.serial.parity = modbus.serial.PARITY_NONE\n self.__instrument.bytesize = 8\n self.__instrument.stopbits = 1\n self.__instrument.timeout = 5 # seconds\n self.__instrument.write_timeout = 5 # seconds\n self.__instrument.clear_buffers_before_each_transaction = True\n # self.__instrument.close_port_after_each_call = True\n self.__time_delay = 0.001 #0.001 # default: 1 ms\n self.__lock_resource = False # To prevent issuing simultaneous commands to RMCS2303 motor controller. Eg.\n # trying to read encoder value while writing motor enable command\n self.name = self.extract_name_from_port_name(port_name)\n self.__status_rotation_direction = 0\n self.__CW = 1 # clockwise rotation status\n self.__CCW = -1 # counter clockwise rotation status\n self.__IDLE = 0 # no rotation status\n\n # Functions\n self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)\n self.brake()\n self.__go_home()\n self.set_acceleration(rhino.ACCELERATION_DEFAULT)\n self.set_speed(rhino.SPEED_DEFAULT)\n\n # Private Functions\n # -----------------\n @staticmethod\n def __convert_unsigned32_to_signed32(unsigned32_data):\n # UInt32 range: 0 to 4294967295\n # Int32 range: -2147483648 to 2147483647\n mid_uint32 = 2147483648\n if unsigned32_data is not None:\n signed32_data = int(unsigned32_data - mid_uint32)\n return signed32_data\n\n @staticmethod\n def __convert_signed32_to_signed16(signed32_data):\n # Int16 range: -32768 to 32767\n signed16_data = signed32_data >> 16\n return signed16_data\n\n def __read_from_register(self, message_list):\n while True: # Attempt sending message until the controller is free\n try:\n if not self.__lock_resource: # Check if controller is in use\n self.__lock_resource = True\n data = self.__instrument.read_register(message_list[0], message_list[1], message_list[2])\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return data\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt: \" + self.name)\n except modbus.ModbusException as e:\n print(\"ModbusException at \" + self.name + \": \" + str(e))\n except modbus.serial.SerialException as e:\n print(\"Modbus Serial Exception at \" + self.name + \": \" + str(e))\n except modbus.InvalidResponseError as e:\n print(\"Modbus Invalid Response Exception at \" + self.name + \": \" + str(e))\n except Exception as e:\n print(\"Motor Driver Exception at \" + self.name + \": \" + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __read_from_registers(self, message_list):\n while True: # Attempt sending message until the controller is free\n try:\n if not self.__lock_resource: # Check if controller is in use\n self.__lock_resource = True\n register_size = 16\n data = self.__instrument.read_registers(message_list[0], message_list[1], message_list[2])\n lsb = data[0]\n msb = data[1]\n combined_data = (msb << register_size) + lsb # combining two 16 bit values into one 32 bit value\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return combined_data\n '''\n # combine two registers and create a long integer\n def combine_two_registers(self, reg):\n if reg[1] > 32767:\n long_reg = (65535 - reg[1])\n b = long_reg << 16\n out = (b + 65535 - reg[0]) * -1\n else:\n long_reg = reg[1]\n b = long_reg << 16\n out = b + reg[0]\n return out\n '''\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt: \" + self.name)\n except modbus.ModbusException as e:\n print(\"ModbusException at \" + self.name + \": \" + str(e))\n except modbus.serial.SerialException as e:\n print(\"Modbus Serial Exception at \" + self.name + \": \" + str(e))\n except modbus.InvalidResponseError as e:\n print(\"Modbus Invalid Response Exception at \" + self.name + \": \" + str(e))\n except Exception as e:\n print(\"Motor Driver Exception at \" + self.name + \": \" + str(e))\n print(traceback.format_exc())\n time.sleep(self.__time_delay)\n\n def __write_to_register(self, message_list):\n while True: # Attempt sending message until the controller is free\n try:\n if not self.__lock_resource: # Check if controller is in use\n self.__lock_resource = True\n self.__instrument.write_register(message_list[0], message_list[1], message_list[2], message_list[3])\n time.sleep(self.__time_delay)\n self.__lock_resource = False\n return\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt: \" + self.name)\n except modbus.ModbusException as e:\n print(\"ModbusException at \" + self.name + \": \" + str(e))\n except modbus.serial.SerialException as e:\n print(\"Modbus Serial Exception at \" + self.name + \": \" + str(e))\n except modbus.InvalidResponseError as e:\n print(\"Modbus Invalid Response Exception at \" + self.name + \": \" + str(e))\n except Exception as e:\n print(\"Motor Driver Exception at \" + self.name + \": \" + str(e))\n print(traceback.format_exc())\n\n time.sleep(self.__time_delay)\n\n def __go_home(self):\n message = rhino.HOME_POSITION_MESSAGE\n self.__write_to_register(message)\n\n def __set_lines_per_rotation(self, lines_per_rotation):\n message = rhino.LINES_PER_ROTATION_MESSAGE\n message[rhino.DATA_INDEX] = lines_per_rotation\n self.__write_to_register(message)\n\n # Public Functions\n # ----------------\n @staticmethod\n def extract_name_from_port_name(port_name):\n chars = port_name.split(\"/\")\n name = chars[len(chars) - 1]\n return name\n\n @staticmethod\n def convert_rad_per_sec_to_rpm(radians_per_sec):\n # Formula: rpm = rad/sec * 9.549297\n rpm = radians_per_sec * 9.549297\n rpm_scaled = rpm * rhino.GEAR_RATIO\n return rpm_scaled\n\n @staticmethod\n def convert_rpm_to_rad_per_sec(rpm):\n # Formula: rad/sec = rpm * 0.10472\n radians_per_sec = rpm * 0.10472\n radians_per_sec_scaled = radians_per_sec / rhino.GEAR_RATIO\n return radians_per_sec_scaled\n\n def set_speed(self, speed):\n speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))\n if speed_rpm > rhino.SPEED_MAX:\n speed_rpm = rhino.SPEED_MAX\n if speed_rpm < rhino.SPEED_MIN:\n speed_rpm = rhino.SPEED_MIN\n message = rhino.SPEED_MESSAGE\n message[rhino.DATA_INDEX] = speed_rpm\n self.__write_to_register(message)\n\n def set_acceleration(self, acceleration):\n if acceleration > rhino.ACCELERATION_MAX:\n acceleration = rhino.ACCELERATION_MAX\n if acceleration < rhino.ACCELERATION_MIN:\n acceleration = rhino.ACCELERATION_MIN\n message = rhino.ACCELERATION_MESSAGE\n message[rhino.DATA_INDEX] = acceleration\n self.__write_to_register(message)\n\n def turn_motor_cw(self):\n message = rhino.TURN_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CW\n\n def turn_motor_ccw(self):\n message = rhino.TURN_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__CCW\n\n def stop_rotation_cw(self):\n message = rhino.STOP_MOTOR_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation_ccw(self):\n message = rhino.STOP_MOTOR_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def stop_rotation(self):\n message = rhino.STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def emergency_stop(self):\n message = rhino.EMERGENCY_STOP_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def get_position_32bit(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n # position = self.__convert_unsigned32_to_signed32(position)\n return position\n\n def get_position_16bit(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n position_32bit = self.__convert_unsigned32_to_signed32(position)\n position_16bit = self.__convert_signed32_to_signed16(position_32bit)\n return position_16bit\n\n def get_position_raw(self):\n message = rhino.POSITION_FEEDBACK_MESSAGE\n position = self.__read_from_registers(message)\n return position\n\n def get_speed(self):\n message = rhino.SPEED_FEEDBACK_MESSAGE\n speed = self.__read_from_register(message)\n speed = self.__convert_unsigned32_to_signed32(speed)\n return speed\n\n def brake_cw(self):\n message = rhino.BRAKE_CW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake_ccw(self):\n message = rhino.BRAKE_CCW_MESSAGE\n self.__write_to_register(message)\n self.__status_rotation_direction = self.__IDLE\n\n def brake(self):\n if self.__status_rotation_direction == self.__CW:\n self.brake_cw()\n print(self.name + \": Brake CW\")\n self.__status_rotation_direction = self.__IDLE\n elif self.__status_rotation_direction == self.__CCW:\n self.brake_ccw()\n print(self.name + \": Brake CCW\")\n self.__status_rotation_direction = self.__IDLE\n elif self.__status_rotation_direction == self.__IDLE:\n print(self.name + \": Motor idle\")\n else:\n print(self.name + \": Motor Unknown Rotation Status\")\n",
"step-ids": [
16,
19,
24,
27,
29
]
}
|
[
16,
19,
24,
27,
29
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(' O dobro de {} é {}'.format(n, n * 2))
print(' O triplo de {} é {}'.format(n, n * 3))
print(' A Raiz quadrada de {} é {}'.format(n, n * n))
<|reserved_special_token_1|>
n = int(input('Digite um número inteiro: '))
print(' O dobro de {} é {}'.format(n, n * 2))
print(' O triplo de {} é {}'.format(n, n * 3))
print(' A Raiz quadrada de {} é {}'.format(n, n * n))
|
flexible
|
{
"blob_id": "c0ad3d642f28cb11a8225d4d011dbb241bd88432",
"index": 1661,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(' O dobro de {} é {}'.format(n, n * 2))\nprint(' O triplo de {} é {}'.format(n, n * 3))\nprint(' A Raiz quadrada de {} é {}'.format(n, n * n))\n",
"step-3": "n = int(input('Digite um número inteiro: '))\nprint(' O dobro de {} é {}'.format(n, n * 2))\nprint(' O triplo de {} é {}'.format(n, n * 3))\nprint(' A Raiz quadrada de {} é {}'.format(n, n * n))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
from django.core.management import call_command
from django.http import JsonResponse
from django.test import TestCase
from django.urls import reverse
URLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
|
normal
|
{
"blob_id": "676caabb103f67c631bc191b11ab0d2d8ab25d1e",
"index": 5803,
"step-1": "<mask token>\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-2": "<mask token>\n\n\nclass GetJsonData(TestCase):\n <mask token>\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-3": "<mask token>\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-4": "<mask token>\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-5": "import json\n\nfrom django.core.management import call_command\nfrom django.http import JsonResponse\nfrom django.test import TestCase\nfrom django.urls import reverse\n\n\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE, self.map,
STARTING_LIFE, fov=20)
self.objects.append(self.player)
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a') + count),
WHITE, True, False)
self.objects.append(label)
count += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
<|reserved_special_token_0|>
def get_area(self, width, height):
return self.map.get_area(width, height, self.player.location)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE, self.map,
STARTING_LIFE, fov=20)
self.objects.append(self.player)
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a') + count),
WHITE, True, False)
self.objects.append(label)
count += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in
self.objects)
def get_area(self, width, height):
return self.map.get_area(width, height, self.player.location)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE, self.map,
STARTING_LIFE, fov=20)
self.objects.append(self.player)
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a') + count),
WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in
self.objects)
<|reserved_special_token_0|>
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in
self.objects)
def get_area(self, width, height):
return self.map.get_area(width, height, self.player.location)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
START = 0, 0
STARTING_LIFE = 10
WHITE = 255, 255, 255
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE, self.map,
STARTING_LIFE, fov=20)
self.objects.append(self.player)
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a') + count),
WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in
self.objects)
def visible_objects(self):
res = []
for object in self.objects:
if object.visible and object.location in self.player.seen:
if self.map.in_area(self.width, self.height, object.
location, self.player.location):
res.append(object)
return reversed(res)
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in
self.objects)
def get_area(self, width, height):
return self.map.get_area(width, height, self.player.location)
<|reserved_special_token_1|>
from . import utils
from . import objects
START = (0, 0)
STARTING_LIFE = 10
WHITE = (255, 255, 255)
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
# player initialization
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE,
self.map, STARTING_LIFE, fov=20)
self.objects.append(self.player)
# Add room lables to map
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in self.objects)
def visible_objects(self):
res = []
for object in self.objects:
if object.visible and object.location in self.player.seen:
if self.map.in_area(self.width, self.height, object.location, self.player.location):
res.append(object)
return reversed(res)
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in self.objects)
def get_area(self, width, height):
# Get the current area the player is in based on desired size and players location
return self.map.get_area(width, height, self.player.location)
|
flexible
|
{
"blob_id": "5f089c3e67452fe6d14f96a70d792bc0d056b375",
"index": 9227,
"step-1": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n <mask token>\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-2": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-3": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-4": "<mask token>\nSTART = 0, 0\nSTARTING_LIFE = 10\nWHITE = 255, 255, 255\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.\n location, self.player.location):\n res.append(object)\n return reversed(res)\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-5": "from . import utils\nfrom . import objects\n\nSTART = (0, 0)\nSTARTING_LIFE = 10\n\nWHITE = (255, 255, 255)\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n\n self.map = game_map\n self.width = width\n self.height = height\n\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n\n # player initialization\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE,\n self.map, STARTING_LIFE, fov=20)\n\n self.objects.append(self.player)\n\n # Add room lables to map\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.location == location and object.blocks for object in self.objects)\n\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.location, self.player.location):\n res.append(object)\n return reversed(res)\n \n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.blocks and object.location == location for object in self.objects)\n\n def get_area(self, width, height):\n # Get the current area the player is in based on desired size and players location\n return self.map.get_area(width, height, self.player.location)\n\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(list(result))
<|reserved_special_token_0|>
print(list(result))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
even_integers = lambda a: a % 2 == 0
input = [11, 4, 5, 8, 9, 2, 12]
result = filter(even_integers, input)
print(list(result))
input = [3, 5, 7]
result = filter(even_integers, input)
print(list(result))
<|reserved_special_token_1|>
'''
filter_items = lambda a : a[0] == 'b'
fruits = ["apple", "banana", "pear", "orange"]
result = filter(filter_items, fruits)
print(list(result))
'''
'''
Given a list of integers, return the even integers in the list.
input = [11, 4, 5, 8, 9, 2, 12]
output = [4, 8, 2, 12]
input = [3, 5, 7]
output = []
'''
# even_integers = lambda a : a / 2 == 0
even_integers = lambda a : a % 2 == 0
input = [11, 4, 5, 8, 9, 2, 12]
result = filter(even_integers, input)
print(list(result))
input = [3, 5, 7]
result = filter(even_integers, input)
print(list(result))
|
flexible
|
{
"blob_id": "7d9032b2426dbf3c285b99efa78be38d8f76ec24",
"index": 1933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(list(result))\n<mask token>\nprint(list(result))\n",
"step-3": "<mask token>\neven_integers = lambda a: a % 2 == 0\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))\n",
"step-4": "'''\nfilter_items = lambda a : a[0] == 'b'\n\nfruits = [\"apple\", \"banana\", \"pear\", \"orange\"]\nresult = filter(filter_items, fruits)\nprint(list(result))\n'''\n\n'''\nGiven a list of integers, return the even integers in the list.\n\ninput = [11, 4, 5, 8, 9, 2, 12]\noutput = [4, 8, 2, 12]\n\ninput = [3, 5, 7]\noutput = []\n'''\n\n# even_integers = lambda a : a / 2 == 0\neven_integers = lambda a : a % 2 == 0\n\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\n\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class _TimeIT(object):
<|reserved_special_token_0|>
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import gc
from inspect import signature, getsourcelines
from operator import itemgetter
from time import perf_counter
from SpeedIT.ProjectErr import Err
from SpeedIT.Utils import format_time, get_table_rst_formatted_lines
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
<|reserved_special_token_1|>
""" Benchmark module: can also compare multiple functions
"""
import gc
from inspect import (
signature,
getsourcelines
)
from operator import itemgetter
from time import perf_counter
from SpeedIT.ProjectErr import Err
from SpeedIT.Utils import (
format_time,
get_table_rst_formatted_lines
)
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list, check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, "__name__", self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if self.run_sec is not None and self.run_sec != -1 and self.run_sec < 0.1:
raise Err('_TimeIT.__init__()', 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', "exec")
exec(_code, globals(), _ns)
self.inner = _ns["inner"]
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
# PREPARE: remove docstring and get final indentation
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#': # remove comment lines
if not ('::SPEEDIT::' in stripped_codeline or '**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(stripped_codeline)
indent_ = codebody_indent - func_def_indent
# Check if we have a docstring
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
# do the func code body
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
# remove empty
if line_orig:
# get indentation check it is a multiple of indent_
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#': # remove comment lines: keep any with ::SPEEDIT::
if '::SPEEDIT::' in stripped_line or '**SPEEDIT**' in stripped_line:
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function', '<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}'.format(self.orig_func_name, indent_, line_indentation, line_orig))
line_indentation_level = int((line_indentation - func_def_indent) / indent_) + 1 # need one extra level
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if start_tag_block_speedit != end_tag_block_speedit:
# expected END Tag
raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}'.format(self.orig_func_name, has_block_speedit, line_orig))
adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if end_tag_block_speedit != start_tag_block_speedit - 1:
# expected START TAG
raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}'.format(self.orig_func_name, has_block_speedit, line_orig))
# Do this inner result
adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append((' ' * line_indentation_level) + 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)
else:
adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)
# CHECK: LAST END TAG
# e.g. if a function body ends with an END-TAG this is not returned by: inspect.getsourcelines(self.func)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
# Do the last inner result: ADDING an END-TAG
adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
# add the normal perf_counter time lines
else:
adjusted_func_code_line.insert(0, ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')
adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
# Do the arguments
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
# check if we have a keyword
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else: # use the positional
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
# TODO: From docs: 3.4 Python has no explicit syntax for defining positional-only parameters, but many built-in and extension module functions (especially those that accept only one or two parameters) accept them.
raise Err('_TimeIT.get_final_inner_function()', 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL: # do the remaining POSITIONAL arguments
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else: # use the default
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.VAR_KEYWORD: # do the remaining KEYWORD arguments
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append((' ' * 2) + parameter_line)
else:
continue
# do self.setup_line_list
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'',
' _speeit_prefix__run_sec = {}'.format(self.run_sec),
'',
' # ==================== START SETUP LINES ==================== #',
'',
]
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = [
'',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times',
' _speeit_prefix__check_reference_time = {}'.format(self.perf_counter_reference_time),
' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:',
' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,',
' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec',
' }',
' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True',
' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()',
' while True:',
' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0',
'',
' # ==================== START CODE BLOCK ==================== #',
'',
]
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = [
'',
' # ==================== END CODE BLOCK ==================== #',
'',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time',
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:',
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec',
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time',
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:',
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec',
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time',
' if _speeit_prefix__run_once:',
' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:',
' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops',
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,',
' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec',
' }',
''
]
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True, output_in_sec=False, benchmarkit__with_gc=False, benchmarkit__check_too_fast=True, benchmarkit__rank_by='best', benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()', 'At least one function must be defined in `func_dict`: <{}>'.format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()', '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()', '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.format(benchmarkit__repeat))
all_final_lines = []
# get once the perf_counter_reference_time
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================',
'',
''
])
# Run all only once and get the code
for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, "__name__", function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(func_name),
'',
benchmark_result,
'',
'',
])
else:
title_line = 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, "__name__", function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format((dict_['best_loop_sec'] / compare_reference) * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format((dict_['avg_loop_sec'] / compare_reference) * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])
header_mapping = [
('name', 'name'),
('rank-{}'.format(benchmarkit__rank_by), 'rank'),
('compare %', 'compare'),
('num. loops', 'loops'),
('avg_loop', 'avg_loop_sec'),
('best_loop', 'best_loop_sec'),
('second_best_loop', 'second_best_loop_sec'),
('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'),
('all_loops time', 'all_loops_time_sec')
]
all_final_lines.extend(get_table_rst_formatted_lines(table, header_mapping, title_line))
all_final_lines.extend([
'',
'',
])
return '\n'.join(all_final_lines)
|
flexible
|
{
"blob_id": "b2d3ebe4b1ce8f6f0fde8495fb90542080b810ce",
"index": 1390,
"step-1": "<mask token>\n\n\nclass _TimeIT(object):\n <mask token>\n\n def __init__(self, func, args_list, kwargs_dict, setup_line_list,\n check_too_fast, run_sec, name, perf_counter_reference_time):\n \"\"\" Constructor. See class doc string.\n \"\"\"\n self.func = func\n self.orig_func_name = getattr(self.func, '__name__', self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if (self.run_sec is not None and self.run_sec != -1 and self.\n run_sec < 0.1):\n raise Err('_TimeIT.__init__()',\n 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'\n .format(self.run_sec))\n _code = compile(self.src, 'benchmarkit-src', 'exec')\n exec(_code, globals(), _ns)\n self.inner = _ns['inner']\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.\n format(self.func))\n\n def benchmark_it(self, with_gc):\n \"\"\" Returns timing result for the `func code block`\n\n .. note::\n By default, timeit() temporarily turns off garbage collection during the timing.\n The advantage of this approach is that it makes independent timings more comparable.\n This disadvantage is that GC may be an important component of the performance of the function being measured.\n If so, GC can be re-enabled as the with_gc=True\n\n Returns:\n dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec\n\n - loops: how many times the `func code block` was executed (looped over)\n - all_loops_time_sec: the total time in seconds for all loops:\n only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime\n - avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:\n if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it\n - two_best_loop_sec: time in seconds for the two fastest of all loops\n - two_worst_loop_sec: time in seconds for the two slowest of all loops\n\n Raises:\n SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1\n \"\"\"\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result\n\n def __get_final_inner_function(self):\n \"\"\" Returns a string of an generated inner function with the code body from: func\n\n Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict\n\n .. warnings:: the `func` function may not have any return statements: but any inner function can have one\n\n Returns:\n str: generated inner function\n\n Raises:\n SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation\n \"\"\"\n has_block_speedit = False\n _start_block_stripped_line = ''\n start_tag_block_speedit = 0\n end_tag_block_speedit = 0\n func_line, lnum = getsourcelines(self.func)\n sig = signature(self.func)\n indent_ = None\n func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())\n func_body = func_line[1:]\n search_docstring = False\n first_none_docstring_idx = 0\n for idx, line_orig in enumerate(func_body):\n rstripped_line = line_orig.rstrip()\n if rstripped_line:\n stripped_codeline = rstripped_line.lstrip()\n if stripped_codeline[0] == '#':\n if not ('::SPEEDIT::' in stripped_codeline or \n '**SPEEDIT**' in stripped_codeline):\n continue\n if search_docstring:\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = False\n continue\n else:\n codebody_indent = len(rstripped_line) - len(\n stripped_codeline)\n indent_ = codebody_indent - func_def_indent\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = True\n continue\n first_none_docstring_idx = idx\n break\n adjusted_func_code_line = []\n for line_orig in func_body[first_none_docstring_idx:]:\n if line_orig:\n rstrip_line = line_orig.rstrip()\n if rstrip_line:\n stripped_line = rstrip_line.lstrip()\n if stripped_line[0] == '#':\n if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in\n stripped_line):\n has_block_speedit = True\n else:\n continue\n line_indentation = len(rstrip_line) - len(stripped_line)\n if line_indentation % indent_ != 0:\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}\"\"\"\n .format(self.orig_func_name, indent_,\n line_indentation, line_orig))\n line_indentation_level = int((line_indentation -\n func_def_indent) / indent_) + 1\n if has_block_speedit:\n if '::SPEEDIT::' in stripped_line:\n if (start_tag_block_speedit !=\n end_tag_block_speedit):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n start_tag_block_speedit += 1\n _start_block_stripped_line = stripped_line\n elif '**SPEEDIT**' in stripped_line:\n if (end_tag_block_speedit != \n start_tag_block_speedit - 1):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'\n .format(_start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n end_tag_block_speedit += 1\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n if has_block_speedit:\n if start_tag_block_speedit != end_tag_block_speedit:\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'.format(\n _start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n else:\n adjusted_func_code_line.insert(0,\n ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n final_param_line = []\n for param, value in sig.parameters.items():\n if value.kind == value.POSITIONAL_OR_KEYWORD:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.POSITIONAL_ONLY:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n raise Err('_TimeIT.get_final_inner_function()',\n 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'\n .format(param, value.kind))\n elif value.kind == value.VAR_POSITIONAL:\n parameter_line = '{} = {}'.format(param, self.args_list)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.KEYWORD_ONLY:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = value.default\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.VAR_KEYWORD:\n parameter_line = '{} = {}'.format(param, self.kwargs_dict)\n final_param_line.append(' ' * 2 + parameter_line)\n else:\n continue\n final_setup_lines = []\n for setup_line in self.setup_line_list:\n setup_line = setup_line.strip()\n if setup_line:\n final_setup_lines.append(' ' + setup_line)\n final_inner_function_lines = [\n 'def inner(): # orig function name: <{}>'.format(self.\n orig_func_name),\n ' from time import perf_counter as _speeit_prefix__perf_counter',\n '', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',\n ' # ==================== START SETUP LINES ==================== #'\n , '']\n final_inner_function_lines.extend(final_setup_lines)\n inner_function_lines_part2 = ['',\n ' # ==================== END SETUP LINES ==================== #',\n '',\n ' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'\n , ' _speeit_prefix__check_reference_time = {}'.format(self.\n perf_counter_reference_time), ' _speeit_prefix__loops = 0',\n ' _speeit_prefix__all_loops_time_sec = 0.0',\n ' _speeit_prefix__avg_loop_sec = 0.0',\n ' _speeit_prefix__best_loop_sec = 99999999999.0',\n ' _speeit_prefix__second_best_loop_sec = 99999999999.0',\n ' _speeit_prefix__worst_loop_sec = 0.0',\n ' _speeit_prefix__second_worst_loop_sec = 0.0',\n ' if _speeit_prefix__run_sec is None:', ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,'\n , ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', ' elif _speeit_prefix__run_sec == -1:',\n ' # only run it once',\n ' _speeit_prefix__run_once = True', ' else:',\n ' _speeit_prefix__run_once = False',\n ' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'\n , ' while True:', ' _speeit_prefix__loops += 1',\n ' _speeit_prefix__result_time = 0', '',\n ' # ==================== START CODE BLOCK ==================== #'\n , '']\n final_inner_function_lines.extend(inner_function_lines_part2)\n final_inner_function_lines.extend(final_param_line)\n final_inner_function_lines.extend(adjusted_func_code_line)\n inner_function_lines_rest = ['',\n ' # ==================== END CODE BLOCK ==================== #'\n , '',\n ' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'\n ,\n ' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'\n ,\n ' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'\n ,\n ' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'\n ,\n ' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'\n , ' if _speeit_prefix__run_once:', ' break',\n ' # check if we have to get out',\n ' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'\n , ' break',\n ' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'\n ,\n ' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',\n ' _speeit_prefix__second_best_loop_sec = -1.0',\n ' if _speeit_prefix__second_worst_loop_sec == 0.0:',\n ' _speeit_prefix__second_worst_loop_sec = -1.0',\n ' return {', ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', '']\n final_inner_function_lines.extend(inner_function_lines_rest)\n return '\\n'.join(final_inner_function_lines)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _TimeIT(object):\n \"\"\" Class for timing execution speed of function code.\n\n Partially based on code from python timeit.py\n\n This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`\n This avoids calling into the function itself\n\n Args:\n func (function):\n\n .. warning:: the `func` function may not have any return statements: but any inner function can have one\n\n OK\n\n .. code-block:: python\n\n def example_formal_func_inner(data_):\n shuffle(data_)\n def fninner(x):\n return x[1]\n result = sorted(data_.items(), key=fninner)\n del result\n\n NOT OK\n\n .. code-block:: python\n\n def example_pep265(data_):\n shuffle(data_)\n result = sorted(data_.items(), key=itemgetter(1))\n return result\n\n func_positional_arguments (list): positional arguments for the function\n func_keyword_arguments (dict): any keyword arguments for the function\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n this part is executed once before the actual `func code block` enters the loop\n\n .. warning:: no multiline string or indented code line\n\n check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)\n\n - if run_sec is -1: then the generated function source code is only run once\n\n - if run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n name (str): the name used for the output `name` part\n\n perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()\n \"\"\"\n\n def __init__(self, func, args_list, kwargs_dict, setup_line_list,\n check_too_fast, run_sec, name, perf_counter_reference_time):\n \"\"\" Constructor. See class doc string.\n \"\"\"\n self.func = func\n self.orig_func_name = getattr(self.func, '__name__', self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if (self.run_sec is not None and self.run_sec != -1 and self.\n run_sec < 0.1):\n raise Err('_TimeIT.__init__()',\n 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'\n .format(self.run_sec))\n _code = compile(self.src, 'benchmarkit-src', 'exec')\n exec(_code, globals(), _ns)\n self.inner = _ns['inner']\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.\n format(self.func))\n\n def benchmark_it(self, with_gc):\n \"\"\" Returns timing result for the `func code block`\n\n .. note::\n By default, timeit() temporarily turns off garbage collection during the timing.\n The advantage of this approach is that it makes independent timings more comparable.\n This disadvantage is that GC may be an important component of the performance of the function being measured.\n If so, GC can be re-enabled as the with_gc=True\n\n Returns:\n dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec\n\n - loops: how many times the `func code block` was executed (looped over)\n - all_loops_time_sec: the total time in seconds for all loops:\n only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime\n - avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:\n if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it\n - two_best_loop_sec: time in seconds for the two fastest of all loops\n - two_worst_loop_sec: time in seconds for the two slowest of all loops\n\n Raises:\n SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1\n \"\"\"\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result\n\n def __get_final_inner_function(self):\n \"\"\" Returns a string of an generated inner function with the code body from: func\n\n Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict\n\n .. warnings:: the `func` function may not have any return statements: but any inner function can have one\n\n Returns:\n str: generated inner function\n\n Raises:\n SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation\n \"\"\"\n has_block_speedit = False\n _start_block_stripped_line = ''\n start_tag_block_speedit = 0\n end_tag_block_speedit = 0\n func_line, lnum = getsourcelines(self.func)\n sig = signature(self.func)\n indent_ = None\n func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())\n func_body = func_line[1:]\n search_docstring = False\n first_none_docstring_idx = 0\n for idx, line_orig in enumerate(func_body):\n rstripped_line = line_orig.rstrip()\n if rstripped_line:\n stripped_codeline = rstripped_line.lstrip()\n if stripped_codeline[0] == '#':\n if not ('::SPEEDIT::' in stripped_codeline or \n '**SPEEDIT**' in stripped_codeline):\n continue\n if search_docstring:\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = False\n continue\n else:\n codebody_indent = len(rstripped_line) - len(\n stripped_codeline)\n indent_ = codebody_indent - func_def_indent\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = True\n continue\n first_none_docstring_idx = idx\n break\n adjusted_func_code_line = []\n for line_orig in func_body[first_none_docstring_idx:]:\n if line_orig:\n rstrip_line = line_orig.rstrip()\n if rstrip_line:\n stripped_line = rstrip_line.lstrip()\n if stripped_line[0] == '#':\n if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in\n stripped_line):\n has_block_speedit = True\n else:\n continue\n line_indentation = len(rstrip_line) - len(stripped_line)\n if line_indentation % indent_ != 0:\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}\"\"\"\n .format(self.orig_func_name, indent_,\n line_indentation, line_orig))\n line_indentation_level = int((line_indentation -\n func_def_indent) / indent_) + 1\n if has_block_speedit:\n if '::SPEEDIT::' in stripped_line:\n if (start_tag_block_speedit !=\n end_tag_block_speedit):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n start_tag_block_speedit += 1\n _start_block_stripped_line = stripped_line\n elif '**SPEEDIT**' in stripped_line:\n if (end_tag_block_speedit != \n start_tag_block_speedit - 1):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'\n .format(_start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n end_tag_block_speedit += 1\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n if has_block_speedit:\n if start_tag_block_speedit != end_tag_block_speedit:\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'.format(\n _start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n else:\n adjusted_func_code_line.insert(0,\n ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n final_param_line = []\n for param, value in sig.parameters.items():\n if value.kind == value.POSITIONAL_OR_KEYWORD:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.POSITIONAL_ONLY:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n raise Err('_TimeIT.get_final_inner_function()',\n 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'\n .format(param, value.kind))\n elif value.kind == value.VAR_POSITIONAL:\n parameter_line = '{} = {}'.format(param, self.args_list)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.KEYWORD_ONLY:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = value.default\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.VAR_KEYWORD:\n parameter_line = '{} = {}'.format(param, self.kwargs_dict)\n final_param_line.append(' ' * 2 + parameter_line)\n else:\n continue\n final_setup_lines = []\n for setup_line in self.setup_line_list:\n setup_line = setup_line.strip()\n if setup_line:\n final_setup_lines.append(' ' + setup_line)\n final_inner_function_lines = [\n 'def inner(): # orig function name: <{}>'.format(self.\n orig_func_name),\n ' from time import perf_counter as _speeit_prefix__perf_counter',\n '', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',\n ' # ==================== START SETUP LINES ==================== #'\n , '']\n final_inner_function_lines.extend(final_setup_lines)\n inner_function_lines_part2 = ['',\n ' # ==================== END SETUP LINES ==================== #',\n '',\n ' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'\n , ' _speeit_prefix__check_reference_time = {}'.format(self.\n perf_counter_reference_time), ' _speeit_prefix__loops = 0',\n ' _speeit_prefix__all_loops_time_sec = 0.0',\n ' _speeit_prefix__avg_loop_sec = 0.0',\n ' _speeit_prefix__best_loop_sec = 99999999999.0',\n ' _speeit_prefix__second_best_loop_sec = 99999999999.0',\n ' _speeit_prefix__worst_loop_sec = 0.0',\n ' _speeit_prefix__second_worst_loop_sec = 0.0',\n ' if _speeit_prefix__run_sec is None:', ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,'\n , ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', ' elif _speeit_prefix__run_sec == -1:',\n ' # only run it once',\n ' _speeit_prefix__run_once = True', ' else:',\n ' _speeit_prefix__run_once = False',\n ' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'\n , ' while True:', ' _speeit_prefix__loops += 1',\n ' _speeit_prefix__result_time = 0', '',\n ' # ==================== START CODE BLOCK ==================== #'\n , '']\n final_inner_function_lines.extend(inner_function_lines_part2)\n final_inner_function_lines.extend(final_param_line)\n final_inner_function_lines.extend(adjusted_func_code_line)\n inner_function_lines_rest = ['',\n ' # ==================== END CODE BLOCK ==================== #'\n , '',\n ' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'\n ,\n ' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'\n ,\n ' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'\n ,\n ' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'\n ,\n ' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'\n , ' if _speeit_prefix__run_once:', ' break',\n ' # check if we have to get out',\n ' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'\n , ' break',\n ' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'\n ,\n ' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',\n ' _speeit_prefix__second_best_loop_sec = -1.0',\n ' if _speeit_prefix__second_worst_loop_sec == 0.0:',\n ' _speeit_prefix__second_worst_loop_sec = -1.0',\n ' return {', ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', '']\n final_inner_function_lines.extend(inner_function_lines_rest)\n return '\\n'.join(final_inner_function_lines)\n\n\ndef speedit_benchmark(func_dict, setup_line_list, use_func_name=True,\n output_in_sec=False, benchmarkit__with_gc=False,\n benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',\n benchmarkit__run_sec=1, benchmarkit__repeat=3):\n \"\"\" Returns one txt string for the ready comparison table: format is conform with reStructuredText\n\n Usage:\n\n .. code-block:: python\n\n func_dict = {\n 'function_f1': (function_f1, [act_one_hamlet], {}),\n 'function_f2': (function_f2, [act_one_hamlet], {}),\n 'function_f3': (function_f3, [act_one_hamlet], {}),\n }\n\n setup_line_list = [\n 'from random import shuffle',\n 'from os.path import abspath, dirname, join',\n 'MY_CONSTANT = 15'\n ]\n\n benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)\n\n Args:\n func_dict (dict): mapping function names to functions\n value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n\n .. warning:: no multiline string or indented code line\n\n use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`\n\n output_in_sec (int): if true the output is keep in seconds if false it is transformed to:\n second (s)\n millisecond (ms) One thousandth of one second\n microsecond (µs) One millionth of one second\n nanosecond (ns) One billionth of one second\n\n benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing\n\n benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n benchmarkit__rank_by (str): `best` or `average`\n\n benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec\n\n - if benchmarkit__run_sec is -1: then the generated function source code is only run once\n\n - if benchmarkit__run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n benchmarkit__repeat (int): how often everything is repeated\n This is a convenience variable that calls the whole setup repeatedly\n\n Returns:\n str: ready to print or write to file: table format is conform with reStructuredText\n\n Raises:\n SpeedIT.Err\n \"\"\"\n if not func_dict:\n raise Err('speedit_benchmark()',\n 'At least one function must be defined in `func_dict`: <{}>'.\n format(func_dict))\n if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':\n raise Err('speedit_benchmark()',\n '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'\n .format(benchmarkit__rank_by))\n if benchmarkit__repeat < 1:\n raise Err('speedit_benchmark()',\n '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.\n format(benchmarkit__repeat))\n all_final_lines = []\n perf_counter_reference_time = _helper_get_perf_counter_reference_time()\n if benchmarkit__run_sec is None:\n all_final_lines.extend([\n '================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'\n , '', ''])\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_, func_positional_arguments,\n func_keyword_arguments, setup_line_list,\n benchmarkit__check_too_fast, benchmarkit__run_sec, name,\n perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)\n all_final_lines.extend([\n '===================== function name: <{}>'.format(\n func_name), '', benchmark_result, '', ''])\n else:\n title_line = (\n 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '\n .format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)\n )\n for repeat_all in range(benchmarkit__repeat):\n table = []\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_,\n func_positional_arguments, func_keyword_arguments,\n setup_line_list, benchmarkit__check_too_fast,\n benchmarkit__run_sec, name, perf_counter_reference_time\n ).benchmark_it(with_gc=benchmarkit__with_gc)\n table.append(benchmark_result)\n if benchmarkit__rank_by == 'best':\n table = sorted(table, key=itemgetter('best_loop_sec'))\n compare_reference = table[0]['best_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'best_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n elif benchmarkit__rank_by == 'average':\n table = sorted(table, key=itemgetter('avg_loop_sec'))\n compare_reference = table[0]['avg_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'avg_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n header_mapping = [('name', 'name'), ('rank-{}'.format(\n benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (\n 'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (\n 'best_loop', 'best_loop_sec'), ('second_best_loop',\n 'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),\n ('second_worst_loop', 'second_worst_loop_sec'), (\n 'all_loops time', 'all_loops_time_sec')]\n all_final_lines.extend(get_table_rst_formatted_lines(table,\n header_mapping, title_line))\n all_final_lines.extend(['', ''])\n return '\\n'.join(all_final_lines)\n",
"step-3": "<mask token>\n\n\ndef _helper_get_perf_counter_reference_time():\n \"\"\" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n Returns:\n float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times\n \"\"\"\n _result_time = 99999999999.0\n for y_ in range(50):\n for x_ in range(3000):\n temp_start = perf_counter()\n temp_time = perf_counter() - temp_start\n if temp_time < _result_time:\n _result_time = temp_time\n return _result_time * 2\n\n\nclass _TimeIT(object):\n \"\"\" Class for timing execution speed of function code.\n\n Partially based on code from python timeit.py\n\n This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`\n This avoids calling into the function itself\n\n Args:\n func (function):\n\n .. warning:: the `func` function may not have any return statements: but any inner function can have one\n\n OK\n\n .. code-block:: python\n\n def example_formal_func_inner(data_):\n shuffle(data_)\n def fninner(x):\n return x[1]\n result = sorted(data_.items(), key=fninner)\n del result\n\n NOT OK\n\n .. code-block:: python\n\n def example_pep265(data_):\n shuffle(data_)\n result = sorted(data_.items(), key=itemgetter(1))\n return result\n\n func_positional_arguments (list): positional arguments for the function\n func_keyword_arguments (dict): any keyword arguments for the function\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n this part is executed once before the actual `func code block` enters the loop\n\n .. warning:: no multiline string or indented code line\n\n check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)\n\n - if run_sec is -1: then the generated function source code is only run once\n\n - if run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n name (str): the name used for the output `name` part\n\n perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()\n \"\"\"\n\n def __init__(self, func, args_list, kwargs_dict, setup_line_list,\n check_too_fast, run_sec, name, perf_counter_reference_time):\n \"\"\" Constructor. See class doc string.\n \"\"\"\n self.func = func\n self.orig_func_name = getattr(self.func, '__name__', self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if (self.run_sec is not None and self.run_sec != -1 and self.\n run_sec < 0.1):\n raise Err('_TimeIT.__init__()',\n 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'\n .format(self.run_sec))\n _code = compile(self.src, 'benchmarkit-src', 'exec')\n exec(_code, globals(), _ns)\n self.inner = _ns['inner']\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.\n format(self.func))\n\n def benchmark_it(self, with_gc):\n \"\"\" Returns timing result for the `func code block`\n\n .. note::\n By default, timeit() temporarily turns off garbage collection during the timing.\n The advantage of this approach is that it makes independent timings more comparable.\n This disadvantage is that GC may be an important component of the performance of the function being measured.\n If so, GC can be re-enabled as the with_gc=True\n\n Returns:\n dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec\n\n - loops: how many times the `func code block` was executed (looped over)\n - all_loops_time_sec: the total time in seconds for all loops:\n only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime\n - avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:\n if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it\n - two_best_loop_sec: time in seconds for the two fastest of all loops\n - two_worst_loop_sec: time in seconds for the two slowest of all loops\n\n Raises:\n SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1\n \"\"\"\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result\n\n def __get_final_inner_function(self):\n \"\"\" Returns a string of an generated inner function with the code body from: func\n\n Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict\n\n .. warnings:: the `func` function may not have any return statements: but any inner function can have one\n\n Returns:\n str: generated inner function\n\n Raises:\n SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation\n \"\"\"\n has_block_speedit = False\n _start_block_stripped_line = ''\n start_tag_block_speedit = 0\n end_tag_block_speedit = 0\n func_line, lnum = getsourcelines(self.func)\n sig = signature(self.func)\n indent_ = None\n func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())\n func_body = func_line[1:]\n search_docstring = False\n first_none_docstring_idx = 0\n for idx, line_orig in enumerate(func_body):\n rstripped_line = line_orig.rstrip()\n if rstripped_line:\n stripped_codeline = rstripped_line.lstrip()\n if stripped_codeline[0] == '#':\n if not ('::SPEEDIT::' in stripped_codeline or \n '**SPEEDIT**' in stripped_codeline):\n continue\n if search_docstring:\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = False\n continue\n else:\n codebody_indent = len(rstripped_line) - len(\n stripped_codeline)\n indent_ = codebody_indent - func_def_indent\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = True\n continue\n first_none_docstring_idx = idx\n break\n adjusted_func_code_line = []\n for line_orig in func_body[first_none_docstring_idx:]:\n if line_orig:\n rstrip_line = line_orig.rstrip()\n if rstrip_line:\n stripped_line = rstrip_line.lstrip()\n if stripped_line[0] == '#':\n if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in\n stripped_line):\n has_block_speedit = True\n else:\n continue\n line_indentation = len(rstrip_line) - len(stripped_line)\n if line_indentation % indent_ != 0:\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}\"\"\"\n .format(self.orig_func_name, indent_,\n line_indentation, line_orig))\n line_indentation_level = int((line_indentation -\n func_def_indent) / indent_) + 1\n if has_block_speedit:\n if '::SPEEDIT::' in stripped_line:\n if (start_tag_block_speedit !=\n end_tag_block_speedit):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n start_tag_block_speedit += 1\n _start_block_stripped_line = stripped_line\n elif '**SPEEDIT**' in stripped_line:\n if (end_tag_block_speedit != \n start_tag_block_speedit - 1):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'\n .format(_start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n end_tag_block_speedit += 1\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n if has_block_speedit:\n if start_tag_block_speedit != end_tag_block_speedit:\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'.format(\n _start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n else:\n adjusted_func_code_line.insert(0,\n ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n final_param_line = []\n for param, value in sig.parameters.items():\n if value.kind == value.POSITIONAL_OR_KEYWORD:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.POSITIONAL_ONLY:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n raise Err('_TimeIT.get_final_inner_function()',\n 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'\n .format(param, value.kind))\n elif value.kind == value.VAR_POSITIONAL:\n parameter_line = '{} = {}'.format(param, self.args_list)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.KEYWORD_ONLY:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = value.default\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.VAR_KEYWORD:\n parameter_line = '{} = {}'.format(param, self.kwargs_dict)\n final_param_line.append(' ' * 2 + parameter_line)\n else:\n continue\n final_setup_lines = []\n for setup_line in self.setup_line_list:\n setup_line = setup_line.strip()\n if setup_line:\n final_setup_lines.append(' ' + setup_line)\n final_inner_function_lines = [\n 'def inner(): # orig function name: <{}>'.format(self.\n orig_func_name),\n ' from time import perf_counter as _speeit_prefix__perf_counter',\n '', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',\n ' # ==================== START SETUP LINES ==================== #'\n , '']\n final_inner_function_lines.extend(final_setup_lines)\n inner_function_lines_part2 = ['',\n ' # ==================== END SETUP LINES ==================== #',\n '',\n ' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'\n , ' _speeit_prefix__check_reference_time = {}'.format(self.\n perf_counter_reference_time), ' _speeit_prefix__loops = 0',\n ' _speeit_prefix__all_loops_time_sec = 0.0',\n ' _speeit_prefix__avg_loop_sec = 0.0',\n ' _speeit_prefix__best_loop_sec = 99999999999.0',\n ' _speeit_prefix__second_best_loop_sec = 99999999999.0',\n ' _speeit_prefix__worst_loop_sec = 0.0',\n ' _speeit_prefix__second_worst_loop_sec = 0.0',\n ' if _speeit_prefix__run_sec is None:', ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,'\n , ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', ' elif _speeit_prefix__run_sec == -1:',\n ' # only run it once',\n ' _speeit_prefix__run_once = True', ' else:',\n ' _speeit_prefix__run_once = False',\n ' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'\n , ' while True:', ' _speeit_prefix__loops += 1',\n ' _speeit_prefix__result_time = 0', '',\n ' # ==================== START CODE BLOCK ==================== #'\n , '']\n final_inner_function_lines.extend(inner_function_lines_part2)\n final_inner_function_lines.extend(final_param_line)\n final_inner_function_lines.extend(adjusted_func_code_line)\n inner_function_lines_rest = ['',\n ' # ==================== END CODE BLOCK ==================== #'\n , '',\n ' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'\n ,\n ' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'\n ,\n ' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'\n ,\n ' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'\n ,\n ' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'\n , ' if _speeit_prefix__run_once:', ' break',\n ' # check if we have to get out',\n ' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'\n , ' break',\n ' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'\n ,\n ' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',\n ' _speeit_prefix__second_best_loop_sec = -1.0',\n ' if _speeit_prefix__second_worst_loop_sec == 0.0:',\n ' _speeit_prefix__second_worst_loop_sec = -1.0',\n ' return {', ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', '']\n final_inner_function_lines.extend(inner_function_lines_rest)\n return '\\n'.join(final_inner_function_lines)\n\n\ndef speedit_benchmark(func_dict, setup_line_list, use_func_name=True,\n output_in_sec=False, benchmarkit__with_gc=False,\n benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',\n benchmarkit__run_sec=1, benchmarkit__repeat=3):\n \"\"\" Returns one txt string for the ready comparison table: format is conform with reStructuredText\n\n Usage:\n\n .. code-block:: python\n\n func_dict = {\n 'function_f1': (function_f1, [act_one_hamlet], {}),\n 'function_f2': (function_f2, [act_one_hamlet], {}),\n 'function_f3': (function_f3, [act_one_hamlet], {}),\n }\n\n setup_line_list = [\n 'from random import shuffle',\n 'from os.path import abspath, dirname, join',\n 'MY_CONSTANT = 15'\n ]\n\n benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)\n\n Args:\n func_dict (dict): mapping function names to functions\n value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n\n .. warning:: no multiline string or indented code line\n\n use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`\n\n output_in_sec (int): if true the output is keep in seconds if false it is transformed to:\n second (s)\n millisecond (ms) One thousandth of one second\n microsecond (µs) One millionth of one second\n nanosecond (ns) One billionth of one second\n\n benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing\n\n benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n benchmarkit__rank_by (str): `best` or `average`\n\n benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec\n\n - if benchmarkit__run_sec is -1: then the generated function source code is only run once\n\n - if benchmarkit__run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n benchmarkit__repeat (int): how often everything is repeated\n This is a convenience variable that calls the whole setup repeatedly\n\n Returns:\n str: ready to print or write to file: table format is conform with reStructuredText\n\n Raises:\n SpeedIT.Err\n \"\"\"\n if not func_dict:\n raise Err('speedit_benchmark()',\n 'At least one function must be defined in `func_dict`: <{}>'.\n format(func_dict))\n if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':\n raise Err('speedit_benchmark()',\n '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'\n .format(benchmarkit__rank_by))\n if benchmarkit__repeat < 1:\n raise Err('speedit_benchmark()',\n '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.\n format(benchmarkit__repeat))\n all_final_lines = []\n perf_counter_reference_time = _helper_get_perf_counter_reference_time()\n if benchmarkit__run_sec is None:\n all_final_lines.extend([\n '================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'\n , '', ''])\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_, func_positional_arguments,\n func_keyword_arguments, setup_line_list,\n benchmarkit__check_too_fast, benchmarkit__run_sec, name,\n perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)\n all_final_lines.extend([\n '===================== function name: <{}>'.format(\n func_name), '', benchmark_result, '', ''])\n else:\n title_line = (\n 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '\n .format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)\n )\n for repeat_all in range(benchmarkit__repeat):\n table = []\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_,\n func_positional_arguments, func_keyword_arguments,\n setup_line_list, benchmarkit__check_too_fast,\n benchmarkit__run_sec, name, perf_counter_reference_time\n ).benchmark_it(with_gc=benchmarkit__with_gc)\n table.append(benchmark_result)\n if benchmarkit__rank_by == 'best':\n table = sorted(table, key=itemgetter('best_loop_sec'))\n compare_reference = table[0]['best_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'best_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n elif benchmarkit__rank_by == 'average':\n table = sorted(table, key=itemgetter('avg_loop_sec'))\n compare_reference = table[0]['avg_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'avg_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n header_mapping = [('name', 'name'), ('rank-{}'.format(\n benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (\n 'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (\n 'best_loop', 'best_loop_sec'), ('second_best_loop',\n 'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),\n ('second_worst_loop', 'second_worst_loop_sec'), (\n 'all_loops time', 'all_loops_time_sec')]\n all_final_lines.extend(get_table_rst_formatted_lines(table,\n header_mapping, title_line))\n all_final_lines.extend(['', ''])\n return '\\n'.join(all_final_lines)\n",
"step-4": "<mask token>\nimport gc\nfrom inspect import signature, getsourcelines\nfrom operator import itemgetter\nfrom time import perf_counter\nfrom SpeedIT.ProjectErr import Err\nfrom SpeedIT.Utils import format_time, get_table_rst_formatted_lines\n\n\ndef _helper_get_perf_counter_reference_time():\n \"\"\" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n Returns:\n float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times\n \"\"\"\n _result_time = 99999999999.0\n for y_ in range(50):\n for x_ in range(3000):\n temp_start = perf_counter()\n temp_time = perf_counter() - temp_start\n if temp_time < _result_time:\n _result_time = temp_time\n return _result_time * 2\n\n\nclass _TimeIT(object):\n \"\"\" Class for timing execution speed of function code.\n\n Partially based on code from python timeit.py\n\n This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`\n This avoids calling into the function itself\n\n Args:\n func (function):\n\n .. warning:: the `func` function may not have any return statements: but any inner function can have one\n\n OK\n\n .. code-block:: python\n\n def example_formal_func_inner(data_):\n shuffle(data_)\n def fninner(x):\n return x[1]\n result = sorted(data_.items(), key=fninner)\n del result\n\n NOT OK\n\n .. code-block:: python\n\n def example_pep265(data_):\n shuffle(data_)\n result = sorted(data_.items(), key=itemgetter(1))\n return result\n\n func_positional_arguments (list): positional arguments for the function\n func_keyword_arguments (dict): any keyword arguments for the function\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n this part is executed once before the actual `func code block` enters the loop\n\n .. warning:: no multiline string or indented code line\n\n check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)\n\n - if run_sec is -1: then the generated function source code is only run once\n\n - if run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n name (str): the name used for the output `name` part\n\n perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()\n \"\"\"\n\n def __init__(self, func, args_list, kwargs_dict, setup_line_list,\n check_too_fast, run_sec, name, perf_counter_reference_time):\n \"\"\" Constructor. See class doc string.\n \"\"\"\n self.func = func\n self.orig_func_name = getattr(self.func, '__name__', self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if (self.run_sec is not None and self.run_sec != -1 and self.\n run_sec < 0.1):\n raise Err('_TimeIT.__init__()',\n 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'\n .format(self.run_sec))\n _code = compile(self.src, 'benchmarkit-src', 'exec')\n exec(_code, globals(), _ns)\n self.inner = _ns['inner']\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.\n format(self.func))\n\n def benchmark_it(self, with_gc):\n \"\"\" Returns timing result for the `func code block`\n\n .. note::\n By default, timeit() temporarily turns off garbage collection during the timing.\n The advantage of this approach is that it makes independent timings more comparable.\n This disadvantage is that GC may be an important component of the performance of the function being measured.\n If so, GC can be re-enabled as the with_gc=True\n\n Returns:\n dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec\n\n - loops: how many times the `func code block` was executed (looped over)\n - all_loops_time_sec: the total time in seconds for all loops:\n only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime\n - avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:\n if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it\n - two_best_loop_sec: time in seconds for the two fastest of all loops\n - two_worst_loop_sec: time in seconds for the two slowest of all loops\n\n Raises:\n SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1\n \"\"\"\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result\n\n def __get_final_inner_function(self):\n \"\"\" Returns a string of an generated inner function with the code body from: func\n\n Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict\n\n .. warnings:: the `func` function may not have any return statements: but any inner function can have one\n\n Returns:\n str: generated inner function\n\n Raises:\n SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation\n \"\"\"\n has_block_speedit = False\n _start_block_stripped_line = ''\n start_tag_block_speedit = 0\n end_tag_block_speedit = 0\n func_line, lnum = getsourcelines(self.func)\n sig = signature(self.func)\n indent_ = None\n func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())\n func_body = func_line[1:]\n search_docstring = False\n first_none_docstring_idx = 0\n for idx, line_orig in enumerate(func_body):\n rstripped_line = line_orig.rstrip()\n if rstripped_line:\n stripped_codeline = rstripped_line.lstrip()\n if stripped_codeline[0] == '#':\n if not ('::SPEEDIT::' in stripped_codeline or \n '**SPEEDIT**' in stripped_codeline):\n continue\n if search_docstring:\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = False\n continue\n else:\n codebody_indent = len(rstripped_line) - len(\n stripped_codeline)\n indent_ = codebody_indent - func_def_indent\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3\n ] == \"'''\":\n search_docstring = True\n continue\n first_none_docstring_idx = idx\n break\n adjusted_func_code_line = []\n for line_orig in func_body[first_none_docstring_idx:]:\n if line_orig:\n rstrip_line = line_orig.rstrip()\n if rstrip_line:\n stripped_line = rstrip_line.lstrip()\n if stripped_line[0] == '#':\n if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in\n stripped_line):\n has_block_speedit = True\n else:\n continue\n line_indentation = len(rstrip_line) - len(stripped_line)\n if line_indentation % indent_ != 0:\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}\"\"\"\n .format(self.orig_func_name, indent_,\n line_indentation, line_orig))\n line_indentation_level = int((line_indentation -\n func_def_indent) / indent_) + 1\n if has_block_speedit:\n if '::SPEEDIT::' in stripped_line:\n if (start_tag_block_speedit !=\n end_tag_block_speedit):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n start_tag_block_speedit += 1\n _start_block_stripped_line = stripped_line\n elif '**SPEEDIT**' in stripped_line:\n if (end_tag_block_speedit != \n start_tag_block_speedit - 1):\n raise Err('_TimeIT.get_final_inner_function',\n \"\"\"<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}\"\"\"\n .format(self.orig_func_name,\n has_block_speedit, line_orig))\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(' ' *\n line_indentation_level +\n 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'\n .format(_start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n end_tag_block_speedit += 1\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n else:\n adjusted_func_code_line.append(' ' *\n line_indentation_level + stripped_line)\n if has_block_speedit:\n if start_tag_block_speedit != end_tag_block_speedit:\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n '\n + ' _start_block_stripped_line: <{}>'.format(\n _start_block_stripped_line) +\n '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n else:\n adjusted_func_code_line.insert(0,\n ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'\n )\n adjusted_func_code_line.append(\n ' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'\n )\n if self.check_too_fast:\n adjusted_func_code_line.append(\n ' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'\n .format(self.orig_func_name) +\n ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'\n )\n final_param_line = []\n for param, value in sig.parameters.items():\n if value.kind == value.POSITIONAL_OR_KEYWORD:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.POSITIONAL_ONLY:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n raise Err('_TimeIT.get_final_inner_function()',\n 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'\n .format(param, value.kind))\n elif value.kind == value.VAR_POSITIONAL:\n parameter_line = '{} = {}'.format(param, self.args_list)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.KEYWORD_ONLY:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else:\n value_to_set = value.default\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append(' ' * 2 + parameter_line)\n elif value.kind == value.VAR_KEYWORD:\n parameter_line = '{} = {}'.format(param, self.kwargs_dict)\n final_param_line.append(' ' * 2 + parameter_line)\n else:\n continue\n final_setup_lines = []\n for setup_line in self.setup_line_list:\n setup_line = setup_line.strip()\n if setup_line:\n final_setup_lines.append(' ' + setup_line)\n final_inner_function_lines = [\n 'def inner(): # orig function name: <{}>'.format(self.\n orig_func_name),\n ' from time import perf_counter as _speeit_prefix__perf_counter',\n '', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',\n ' # ==================== START SETUP LINES ==================== #'\n , '']\n final_inner_function_lines.extend(final_setup_lines)\n inner_function_lines_part2 = ['',\n ' # ==================== END SETUP LINES ==================== #',\n '',\n ' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'\n , ' _speeit_prefix__check_reference_time = {}'.format(self.\n perf_counter_reference_time), ' _speeit_prefix__loops = 0',\n ' _speeit_prefix__all_loops_time_sec = 0.0',\n ' _speeit_prefix__avg_loop_sec = 0.0',\n ' _speeit_prefix__best_loop_sec = 99999999999.0',\n ' _speeit_prefix__second_best_loop_sec = 99999999999.0',\n ' _speeit_prefix__worst_loop_sec = 0.0',\n ' _speeit_prefix__second_worst_loop_sec = 0.0',\n ' if _speeit_prefix__run_sec is None:', ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,'\n , ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', ' elif _speeit_prefix__run_sec == -1:',\n ' # only run it once',\n ' _speeit_prefix__run_once = True', ' else:',\n ' _speeit_prefix__run_once = False',\n ' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'\n , ' while True:', ' _speeit_prefix__loops += 1',\n ' _speeit_prefix__result_time = 0', '',\n ' # ==================== START CODE BLOCK ==================== #'\n , '']\n final_inner_function_lines.extend(inner_function_lines_part2)\n final_inner_function_lines.extend(final_param_line)\n final_inner_function_lines.extend(adjusted_func_code_line)\n inner_function_lines_rest = ['',\n ' # ==================== END CODE BLOCK ==================== #'\n , '',\n ' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'\n ,\n ' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'\n ,\n ' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'\n ,\n ' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'\n ,\n ' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'\n ,\n ' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'\n , ' if _speeit_prefix__run_once:', ' break',\n ' # check if we have to get out',\n ' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'\n , ' break',\n ' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'\n ,\n ' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',\n ' _speeit_prefix__second_best_loop_sec = -1.0',\n ' if _speeit_prefix__second_worst_loop_sec == 0.0:',\n ' _speeit_prefix__second_worst_loop_sec = -1.0',\n ' return {', ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,'\n , ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec'\n , ' }', '']\n final_inner_function_lines.extend(inner_function_lines_rest)\n return '\\n'.join(final_inner_function_lines)\n\n\ndef speedit_benchmark(func_dict, setup_line_list, use_func_name=True,\n output_in_sec=False, benchmarkit__with_gc=False,\n benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',\n benchmarkit__run_sec=1, benchmarkit__repeat=3):\n \"\"\" Returns one txt string for the ready comparison table: format is conform with reStructuredText\n\n Usage:\n\n .. code-block:: python\n\n func_dict = {\n 'function_f1': (function_f1, [act_one_hamlet], {}),\n 'function_f2': (function_f2, [act_one_hamlet], {}),\n 'function_f3': (function_f3, [act_one_hamlet], {}),\n }\n\n setup_line_list = [\n 'from random import shuffle',\n 'from os.path import abspath, dirname, join',\n 'MY_CONSTANT = 15'\n ]\n\n benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)\n\n Args:\n func_dict (dict): mapping function names to functions\n value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n\n .. warning:: no multiline string or indented code line\n\n use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`\n\n output_in_sec (int): if true the output is keep in seconds if false it is transformed to:\n second (s)\n millisecond (ms) One thousandth of one second\n microsecond (µs) One millionth of one second\n nanosecond (ns) One billionth of one second\n\n benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing\n\n benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n benchmarkit__rank_by (str): `best` or `average`\n\n benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec\n\n - if benchmarkit__run_sec is -1: then the generated function source code is only run once\n\n - if benchmarkit__run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n benchmarkit__repeat (int): how often everything is repeated\n This is a convenience variable that calls the whole setup repeatedly\n\n Returns:\n str: ready to print or write to file: table format is conform with reStructuredText\n\n Raises:\n SpeedIT.Err\n \"\"\"\n if not func_dict:\n raise Err('speedit_benchmark()',\n 'At least one function must be defined in `func_dict`: <{}>'.\n format(func_dict))\n if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':\n raise Err('speedit_benchmark()',\n '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'\n .format(benchmarkit__rank_by))\n if benchmarkit__repeat < 1:\n raise Err('speedit_benchmark()',\n '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.\n format(benchmarkit__repeat))\n all_final_lines = []\n perf_counter_reference_time = _helper_get_perf_counter_reference_time()\n if benchmarkit__run_sec is None:\n all_final_lines.extend([\n '================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'\n , '', ''])\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_, func_positional_arguments,\n func_keyword_arguments, setup_line_list,\n benchmarkit__check_too_fast, benchmarkit__run_sec, name,\n perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)\n all_final_lines.extend([\n '===================== function name: <{}>'.format(\n func_name), '', benchmark_result, '', ''])\n else:\n title_line = (\n 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '\n .format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)\n )\n for repeat_all in range(benchmarkit__repeat):\n table = []\n for func_name, (function_, func_positional_arguments,\n func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, '__name__', function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_,\n func_positional_arguments, func_keyword_arguments,\n setup_line_list, benchmarkit__check_too_fast,\n benchmarkit__run_sec, name, perf_counter_reference_time\n ).benchmark_it(with_gc=benchmarkit__with_gc)\n table.append(benchmark_result)\n if benchmarkit__rank_by == 'best':\n table = sorted(table, key=itemgetter('best_loop_sec'))\n compare_reference = table[0]['best_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'best_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n elif benchmarkit__rank_by == 'average':\n table = sorted(table, key=itemgetter('avg_loop_sec'))\n compare_reference = table[0]['avg_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format(dict_[\n 'avg_loop_sec'] / compare_reference * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_[\n 'best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(\n dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[\n 'worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(\n dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_\n ['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_[\n 'avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_[\n 'best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_[\n 'second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_[\n 'worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_[\n 'second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_[\n 'all_loops_time_sec'])\n header_mapping = [('name', 'name'), ('rank-{}'.format(\n benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (\n 'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (\n 'best_loop', 'best_loop_sec'), ('second_best_loop',\n 'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),\n ('second_worst_loop', 'second_worst_loop_sec'), (\n 'all_loops time', 'all_loops_time_sec')]\n all_final_lines.extend(get_table_rst_formatted_lines(table,\n header_mapping, title_line))\n all_final_lines.extend(['', ''])\n return '\\n'.join(all_final_lines)\n",
"step-5": "\"\"\" Benchmark module: can also compare multiple functions\n\"\"\"\nimport gc\nfrom inspect import (\n signature,\n getsourcelines\n)\nfrom operator import itemgetter\nfrom time import perf_counter\n\nfrom SpeedIT.ProjectErr import Err\nfrom SpeedIT.Utils import (\n format_time,\n get_table_rst_formatted_lines\n)\n\n\n\ndef _helper_get_perf_counter_reference_time():\n \"\"\" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n Returns:\n float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times\n \"\"\"\n _result_time = 99999999999.0\n for y_ in range(50):\n for x_ in range(3000):\n temp_start = perf_counter()\n temp_time = perf_counter() - temp_start\n if temp_time < _result_time:\n _result_time = temp_time\n return _result_time * 2\n\n\nclass _TimeIT(object):\n \"\"\" Class for timing execution speed of function code.\n\n Partially based on code from python timeit.py\n\n This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`\n This avoids calling into the function itself\n\n Args:\n func (function):\n\n .. warning:: the `func` function may not have any return statements: but any inner function can have one\n\n OK\n\n .. code-block:: python\n\n def example_formal_func_inner(data_):\n shuffle(data_)\n def fninner(x):\n return x[1]\n result = sorted(data_.items(), key=fninner)\n del result\n\n NOT OK\n\n .. code-block:: python\n\n def example_pep265(data_):\n shuffle(data_)\n result = sorted(data_.items(), key=itemgetter(1))\n return result\n\n func_positional_arguments (list): positional arguments for the function\n func_keyword_arguments (dict): any keyword arguments for the function\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n this part is executed once before the actual `func code block` enters the loop\n\n .. warning:: no multiline string or indented code line\n\n check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)\n\n - if run_sec is -1: then the generated function source code is only run once\n\n - if run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n name (str): the name used for the output `name` part\n\n perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()\n \"\"\"\n\n def __init__(self, func, args_list, kwargs_dict, setup_line_list, check_too_fast, run_sec, name, perf_counter_reference_time):\n \"\"\" Constructor. See class doc string.\n \"\"\"\n self.func = func\n self.orig_func_name = getattr(self.func, \"__name__\", self.func)\n self.args_list = args_list.copy()\n self.kwargs_dict = kwargs_dict.copy()\n self.setup_line_list = setup_line_list\n self.check_too_fast = check_too_fast\n self.run_sec = run_sec\n self.name = name\n self.perf_counter_reference_time = perf_counter_reference_time\n if callable(self.func):\n _ns = {}\n self.src = self.__get_final_inner_function()\n if self.run_sec is not None and self.run_sec != -1 and self.run_sec < 0.1:\n raise Err('_TimeIT.__init__()', 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'.format(self.run_sec))\n\n _code = compile(self.src, 'benchmarkit-src', \"exec\")\n exec(_code, globals(), _ns)\n self.inner = _ns[\"inner\"]\n else:\n raise ValueError('<func>: is not a `callable` type: <{}>'.format(self.func))\n\n\n def benchmark_it(self, with_gc):\n \"\"\" Returns timing result for the `func code block`\n\n .. note::\n By default, timeit() temporarily turns off garbage collection during the timing.\n The advantage of this approach is that it makes independent timings more comparable.\n This disadvantage is that GC may be an important component of the performance of the function being measured.\n If so, GC can be re-enabled as the with_gc=True\n\n Returns:\n dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec\n\n - loops: how many times the `func code block` was executed (looped over)\n - all_loops_time_sec: the total time in seconds for all loops:\n only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime\n - avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:\n if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it\n - two_best_loop_sec: time in seconds for the two fastest of all loops\n - two_worst_loop_sec: time in seconds for the two slowest of all loops\n\n Raises:\n SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1\n \"\"\"\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result\n\n def __get_final_inner_function(self):\n \"\"\" Returns a string of an generated inner function with the code body from: func\n\n Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict\n\n .. warnings:: the `func` function may not have any return statements: but any inner function can have one\n\n Returns:\n str: generated inner function\n\n Raises:\n SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation\n \"\"\"\n has_block_speedit = False\n _start_block_stripped_line = ''\n start_tag_block_speedit = 0\n end_tag_block_speedit = 0\n\n func_line, lnum = getsourcelines(self.func)\n sig = signature(self.func)\n indent_ = None\n func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())\n func_body = func_line[1:]\n search_docstring = False\n\n # PREPARE: remove docstring and get final indentation\n first_none_docstring_idx = 0\n for idx, line_orig in enumerate(func_body):\n rstripped_line = line_orig.rstrip()\n if rstripped_line:\n stripped_codeline = rstripped_line.lstrip()\n if stripped_codeline[0] == '#': # remove comment lines\n if not ('::SPEEDIT::' in stripped_codeline or '**SPEEDIT**' in stripped_codeline):\n continue\n if search_docstring:\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3] == \"'''\":\n search_docstring = False\n continue\n else:\n codebody_indent = len(rstripped_line) - len(stripped_codeline)\n indent_ = codebody_indent - func_def_indent\n # Check if we have a docstring\n if stripped_codeline[0:3] == '\"\"\"' or stripped_codeline[0:3] == \"'''\":\n search_docstring = True\n continue\n first_none_docstring_idx = idx\n break\n\n # do the func code body\n adjusted_func_code_line = []\n for line_orig in func_body[first_none_docstring_idx:]:\n # remove empty\n if line_orig:\n # get indentation check it is a multiple of indent_\n rstrip_line = line_orig.rstrip()\n if rstrip_line:\n stripped_line = rstrip_line.lstrip()\n if stripped_line[0] == '#': # remove comment lines: keep any with ::SPEEDIT::\n if '::SPEEDIT::' in stripped_line or '**SPEEDIT**' in stripped_line:\n has_block_speedit = True\n else:\n continue\n line_indentation = len(rstrip_line) - len(stripped_line)\n if line_indentation % indent_ != 0:\n raise Err('_TimeIT.get_final_inner_function', '<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\\n seems we encountered a wrong indented line: line_indentation: <{}>\\n {}'.format(self.orig_func_name, indent_, line_indentation, line_orig))\n line_indentation_level = int((line_indentation - func_def_indent) / indent_) + 1 # need one extra level\n\n if has_block_speedit:\n if '::SPEEDIT::' in stripped_line:\n if start_tag_block_speedit != end_tag_block_speedit:\n # expected END Tag\n raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\\n Expected an END-TAG <**SPEEDIT**>: \\n {}'.format(self.orig_func_name, has_block_speedit, line_orig))\n adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')\n start_tag_block_speedit += 1\n _start_block_stripped_line = stripped_line\n elif '**SPEEDIT**' in stripped_line:\n if end_tag_block_speedit != start_tag_block_speedit - 1:\n # expected START TAG\n raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\\n Expected an START-TAG <::SPEEDIT::>: \\n {}'.format(self.orig_func_name, has_block_speedit, line_orig))\n # Do this inner result\n adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')\n if self.check_too_fast:\n adjusted_func_code_line.append((' ' * line_indentation_level) + 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')\n end_tag_block_speedit += 1\n else:\n adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)\n else:\n adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)\n\n # CHECK: LAST END TAG\n # e.g. if a function body ends with an END-TAG this is not returned by: inspect.getsourcelines(self.func)\n if has_block_speedit:\n if start_tag_block_speedit != end_tag_block_speedit:\n # Do the last inner result: ADDING an END-TAG\n adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')\n if self.check_too_fast:\n adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')\n\n # add the normal perf_counter time lines\n else:\n adjusted_func_code_line.insert(0, ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')\n adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')\n\n if self.check_too_fast:\n adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception(\"in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')\n\n # Do the arguments\n final_param_line = []\n for param, value in sig.parameters.items():\n if value.kind == value.POSITIONAL_OR_KEYWORD:\n # check if we have a keyword\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else: # use the positional\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append((' ' * 2) + parameter_line)\n elif value.kind == value.POSITIONAL_ONLY:\n value_to_set = self.args_list.pop(0)\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append((' ' * 2) + parameter_line)\n # TODO: From docs: 3.4 Python has no explicit syntax for defining positional-only parameters, but many built-in and extension module functions (especially those that accept only one or two parameters) accept them.\n raise Err('_TimeIT.get_final_inner_function()', 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'.format(param, value.kind))\n elif value.kind == value.VAR_POSITIONAL: # do the remaining POSITIONAL arguments\n parameter_line = '{} = {}'.format(param, self.args_list)\n final_param_line.append((' ' * 2) + parameter_line)\n elif value.kind == value.KEYWORD_ONLY:\n if param in self.kwargs_dict:\n value_to_set = self.kwargs_dict.pop(param)\n else: # use the default\n value_to_set = value.default\n if isinstance(value_to_set, str):\n parameter_line = '{} = \"{}\"'.format(param, value_to_set)\n else:\n parameter_line = '{} = {}'.format(param, value_to_set)\n final_param_line.append((' ' * 2) + parameter_line)\n elif value.kind == value.VAR_KEYWORD: # do the remaining KEYWORD arguments\n parameter_line = '{} = {}'.format(param, self.kwargs_dict)\n final_param_line.append((' ' * 2) + parameter_line)\n else:\n continue\n\n # do self.setup_line_list\n final_setup_lines = []\n for setup_line in self.setup_line_list:\n setup_line = setup_line.strip()\n if setup_line:\n final_setup_lines.append(' ' + setup_line)\n\n final_inner_function_lines = [\n 'def inner(): # orig function name: <{}>'.format(self.orig_func_name),\n ' from time import perf_counter as _speeit_prefix__perf_counter',\n '',\n ' _speeit_prefix__run_sec = {}'.format(self.run_sec),\n '',\n ' # ==================== START SETUP LINES ==================== #',\n '',\n ]\n\n final_inner_function_lines.extend(final_setup_lines)\n\n inner_function_lines_part2 = [\n '',\n ' # ==================== END SETUP LINES ==================== #',\n '',\n ' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times',\n ' _speeit_prefix__check_reference_time = {}'.format(self.perf_counter_reference_time),\n ' _speeit_prefix__loops = 0',\n ' _speeit_prefix__all_loops_time_sec = 0.0',\n ' _speeit_prefix__avg_loop_sec = 0.0',\n ' _speeit_prefix__best_loop_sec = 99999999999.0',\n ' _speeit_prefix__second_best_loop_sec = 99999999999.0',\n ' _speeit_prefix__worst_loop_sec = 0.0',\n ' _speeit_prefix__second_worst_loop_sec = 0.0',\n ' if _speeit_prefix__run_sec is None:',\n ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,',\n ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec',\n ' }',\n ' elif _speeit_prefix__run_sec == -1:',\n ' # only run it once',\n ' _speeit_prefix__run_once = True',\n ' else:',\n ' _speeit_prefix__run_once = False',\n ' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()',\n ' while True:',\n ' _speeit_prefix__loops += 1',\n ' _speeit_prefix__result_time = 0',\n '',\n ' # ==================== START CODE BLOCK ==================== #',\n '',\n ]\n\n final_inner_function_lines.extend(inner_function_lines_part2)\n\n final_inner_function_lines.extend(final_param_line)\n final_inner_function_lines.extend(adjusted_func_code_line)\n\n inner_function_lines_rest = [\n '',\n ' # ==================== END CODE BLOCK ==================== #',\n '',\n ' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time',\n ' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:',\n ' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec',\n ' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time',\n ' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:',\n ' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec',\n ' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time',\n ' if _speeit_prefix__run_once:',\n ' break',\n ' # check if we have to get out',\n ' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:',\n ' break',\n ' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops',\n ' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',\n ' _speeit_prefix__second_best_loop_sec = -1.0',\n ' if _speeit_prefix__second_worst_loop_sec == 0.0:',\n ' _speeit_prefix__second_worst_loop_sec = -1.0',\n ' return {',\n ' \"loops\": _speeit_prefix__loops,',\n ' \"all_loops_time_sec\": _speeit_prefix__all_loops_time_sec,',\n ' \"avg_loop_sec\": _speeit_prefix__avg_loop_sec,',\n ' \"best_loop_sec\": _speeit_prefix__best_loop_sec,',\n ' \"second_best_loop_sec\": _speeit_prefix__second_best_loop_sec,',\n ' \"worst_loop_sec\": _speeit_prefix__worst_loop_sec,',\n ' \"second_worst_loop_sec\": _speeit_prefix__second_worst_loop_sec',\n ' }',\n ''\n ]\n final_inner_function_lines.extend(inner_function_lines_rest)\n\n return '\\n'.join(final_inner_function_lines)\n\n\ndef speedit_benchmark(func_dict, setup_line_list, use_func_name=True, output_in_sec=False, benchmarkit__with_gc=False, benchmarkit__check_too_fast=True, benchmarkit__rank_by='best', benchmarkit__run_sec=1, benchmarkit__repeat=3):\n \"\"\" Returns one txt string for the ready comparison table: format is conform with reStructuredText\n\n Usage:\n\n .. code-block:: python\n\n func_dict = {\n 'function_f1': (function_f1, [act_one_hamlet], {}),\n 'function_f2': (function_f2, [act_one_hamlet], {}),\n 'function_f3': (function_f3, [act_one_hamlet], {}),\n }\n\n setup_line_list = [\n 'from random import shuffle',\n 'from os.path import abspath, dirname, join',\n 'MY_CONSTANT = 15'\n ]\n\n benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)\n\n Args:\n func_dict (dict): mapping function names to functions\n value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)\n setup_line_list (list): of strings with import lines needed by the functions any global data ect..\n\n .. warning:: no multiline string or indented code line\n\n use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`\n\n output_in_sec (int): if true the output is keep in seconds if false it is transformed to:\n second (s)\n millisecond (ms) One thousandth of one second\n microsecond (µs) One millionth of one second\n nanosecond (ns) One billionth of one second\n\n benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing\n\n benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.\n\n - Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times\n\n .. seealso:: _helper_get_perf_counter_reference_time()\n\n benchmarkit__rank_by (str): `best` or `average`\n\n benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec\n\n - if benchmarkit__run_sec is -1: then the generated function source code is only run once\n\n - if benchmarkit__run_sec is None: then the generated function source code is only printed\n this is mainly useful to see the exact final `func code block` which will be timed.\n\n benchmarkit__repeat (int): how often everything is repeated\n This is a convenience variable that calls the whole setup repeatedly\n\n Returns:\n str: ready to print or write to file: table format is conform with reStructuredText\n\n Raises:\n SpeedIT.Err\n \"\"\"\n if not func_dict:\n raise Err('speedit_benchmark()', 'At least one function must be defined in `func_dict`: <{}>'.format(func_dict))\n if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':\n raise Err('speedit_benchmark()', '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'.format(benchmarkit__rank_by))\n if benchmarkit__repeat < 1:\n raise Err('speedit_benchmark()', '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.format(benchmarkit__repeat))\n\n\n all_final_lines = []\n\n # get once the perf_counter_reference_time\n perf_counter_reference_time = _helper_get_perf_counter_reference_time()\n\n if benchmarkit__run_sec is None:\n all_final_lines.extend([\n '================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================',\n '',\n ''\n ])\n # Run all only once and get the code\n for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, \"__name__\", function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)\n all_final_lines.extend([\n '===================== function name: <{}>'.format(func_name),\n '',\n benchmark_result,\n '',\n '',\n ])\n else:\n title_line = 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)\n\n for repeat_all in range(benchmarkit__repeat):\n table = []\n for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):\n if use_func_name:\n name = getattr(function_, \"__name__\", function_)\n else:\n name = func_name\n benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(with_gc=benchmarkit__with_gc)\n table.append(benchmark_result)\n\n if benchmarkit__rank_by == 'best':\n table = sorted(table, key=itemgetter('best_loop_sec'))\n compare_reference = table[0]['best_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format((dict_['best_loop_sec'] / compare_reference) * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])\n elif benchmarkit__rank_by == 'average':\n table = sorted(table, key=itemgetter('avg_loop_sec'))\n compare_reference = table[0]['avg_loop_sec']\n for idx, dict_ in enumerate(table):\n dict_['compare'] = '{:,.3f}'.format((dict_['avg_loop_sec'] / compare_reference) * 100.0)\n dict_['rank'] = '{:,}'.format(idx + 1)\n dict_['loops'] = '{:,}'.format(dict_['loops'])\n if output_in_sec:\n dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])\n dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])\n if dict_['second_best_loop_sec'] == -1.0:\n dict_['second_best_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])\n if dict_['second_worst_loop_sec'] == -1.0:\n dict_['second_worst_loop_sec'] = 'NOT-MEASURED'\n else:\n dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])\n else:\n dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])\n dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])\n dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])\n dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])\n dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])\n dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])\n\n header_mapping = [\n ('name', 'name'),\n ('rank-{}'.format(benchmarkit__rank_by), 'rank'),\n ('compare %', 'compare'),\n ('num. loops', 'loops'),\n ('avg_loop', 'avg_loop_sec'),\n ('best_loop', 'best_loop_sec'),\n ('second_best_loop', 'second_best_loop_sec'),\n ('worst_loop', 'worst_loop_sec'),\n ('second_worst_loop', 'second_worst_loop_sec'),\n ('all_loops time', 'all_loops_time_sec')\n ]\n\n all_final_lines.extend(get_table_rst_formatted_lines(table, header_mapping, title_line))\n all_final_lines.extend([\n '',\n '',\n ])\n\n return '\\n'.join(all_final_lines)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.