filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_27874
|
from pprint import pprint
import itertools as it
BIGVALUE = 123 # replace with your input
d_rose_func = {"E": lambda x,y: (x + 1, y),
"N": lambda x,y: (x, y + 1),
"W": lambda x,y: (x - 1, y),
"S": lambda x,y: (x, y - 1),
}
def change_gold(d_coo_values, v_c):
score = 0
for i,j in it.product((-1,0,1), repeat = 2):
if i == 0 and j == 0:
pass
try:
score += d_coo_values[(v_c[0] + i, v_c[1] + j)]
except:
pass
return score
def main(star):
d_coo_values = {}
value = 1
value_coords = (0,0)
times = 1
turn = 0
flag = False
d_coo_values[value_coords] = value
for rose in it.cycle(d_rose_func):
if turn == 2:
turn = 0
times += 1
for i in range(times):
value_coords = d_rose_func[rose](*value_coords)
if star == "silver":
value += 1
if star == "gold":
value = change_gold(d_coo_values, value_coords)
d_coo_values[value_coords] = value
if value >= BIGVALUE:
flag = True
break
if flag:
break
turn += 1
if star == "silver":
inverted = {v:k for k,v in d_coo_values.items()}
return sum(map(abs, inverted[BIGVALUE]))
if star == "gold":
return value
print("silver", main("silver"))
print("gold", main("gold"))
|
the-stack_0_27877
|
import os
import numpy as np
from tqdm import trange
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
from model import Model
from buffer import Buffer
import data.gaze_data as gaze_data
import data.hand_data as hand_data
import data.boxes_data as boxes_data
from utils import imwrite, imread, img_tile
class Trainer(object):
def __init__(self, config, rng):
self.config = config
self.rng = rng
self.task = config.task
self.model_dir = config.model_dir
self.gpu_memory_fraction = config.gpu_memory_fraction
self.log_step = config.log_step
self.max_step = config.max_step
self.K_d = config.K_d
self.K_g = config.K_g
self.initial_K_d = config.initial_K_d
self.initial_K_g = config.initial_K_g
self.checkpoint_secs = config.checkpoint_secs
DataLoader = {
'gaze': gaze_data.DataLoader,
'hand': hand_data.DataLoader,
'boxes': boxes_data.DataLoader,
}[config.data_set]
self.data_loader = DataLoader(config, rng=self.rng)
self.model = Model(config, self.data_loader)
self.history_buffer = Buffer(config, self.rng)
self.summary_ops = {
'test_synthetic_images': {
'summary': tf.summary.image("test_synthetic_images",
self.model.resized_x,
max_outputs=config.max_image_summary),
'output': self.model.resized_x,
},
'test_refined_images': {
'summary': tf.summary.image("test_refined_images",
self.model.denormalized_R_x,
max_outputs=config.max_image_summary),
'output': self.model.denormalized_R_x,
}
}
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.model.discrim_step)
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=self.gpu_memory_fraction,
allow_growth=True) # seems to be not working
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
def train(self):
print("[*] Training starts...")
self._summary_writer = None
sample_num = reduce(lambda x, y: x * y, self.config.sample_image_grid)
idxs = self.rng.choice(
len(self.data_loader.synthetic_data_paths), sample_num)
test_samples = np.expand_dims(np.stack(
[imread(path) for path in
self.data_loader.synthetic_data_paths[idxs]]
), -1)
def train_refiner(push_buffer=False):
feed_dict = {
self.model.synthetic_batch_size: self.data_loader.batch_size,
}
res = self.model.train_refiner(
self.sess, feed_dict, self._summary_writer, with_output=True)
self._summary_writer = self._get_summary_writer(res)
if push_buffer:
self.history_buffer.push(res['output'])
if res['step'] % self.log_step == 0:
feed_dict = {
self.model.x: test_samples,
}
self._inject_summary(
'test_refined_images', feed_dict, res['step'])
if res['step'] / float(self.log_step) == 1.:
self._inject_summary(
'test_synthetic_images', feed_dict, res['step'])
def train_discrim():
feed_dict = {
self.model.synthetic_batch_size: self.data_loader.batch_size / 2,
self.model.R_x_history: self.history_buffer.sample(),
self.model.y: self.data_loader.next(),
}
res = self.model.train_discrim(
self.sess, feed_dict, self._summary_writer, with_history=True, with_output=False)
self._summary_writer = self._get_summary_writer(res)
for k in trange(self.initial_K_g, desc="Train refiner"):
train_refiner(push_buffer=k > self.initial_K_g * 0.9)
for k in trange(self.initial_K_d, desc="Train discrim"):
train_discrim()
for step in trange(self.max_step, desc="Train both"):
for k in xrange(self.K_g):
train_refiner(push_buffer=True)
for k in xrange(self.K_d):
train_discrim()
def test(self):
batch_size = self.data_loader.batch_size
num_epoch = len(self.data_loader.synthetic_data_paths) / batch_size
for idx in trange(num_epoch, desc="Refine all synthetic images"):
feed_dict = {
self.model.synthetic_batch_size: batch_size,
}
res = self.model.test_refiner(
self.sess, feed_dict, None, with_output=True)
for image, filename in zip(res['output'], res['filename']):
basename = os.path.basename(
filename).replace("_cropped", "_refined")
path = os.path.join(self.config.output_model_dir, basename)
imwrite(path, image[:, :, 0])
def _inject_summary(self, tag, feed_dict, step):
summaries = self.sess.run(self.summary_ops[tag], feed_dict)
self.summary_writer.add_summary(summaries['summary'], step)
path = os.path.join(
self.config.sample_model_dir, "{}.png".format(step))
imwrite(path, img_tile(summaries['output'],
tile_shape=self.config.sample_image_grid)[:, :, 0])
def _get_summary_writer(self, result):
if result['step'] % self.log_step == 0:
return self.summary_writer
else:
return None
|
the-stack_0_27879
|
import argparse
import torch
import pandas as pd
from fairseq import options, tasks, checkpoint_utils
from fairseq.data import encoders
from tqdm import tqdm
import data
def main(script_args, model_args):
split = script_args.split
predictions = predict(image_id_path=f'{model_args.captions_dir}/{split}-ids.txt',
grid_features_path=f'{model_args.features_dir}/{split}-features-grid',
obj_features_path=f'{model_args.features_dir}/{split}-features-obj',
obj_features_meta_path=f'{model_args.features_dir}/{split}-features-obj/metadata.csv',
model_args=model_args)
if not script_args.no_console_output:
print_predictions(predictions)
if script_args.output:
store_predictions_as_csv(predictions, script_args.output)
def predict(image_id_path: str,
grid_features_path: str,
obj_features_path: str,
obj_features_meta_path: str,
model_args) -> pd.DataFrame:
print(model_args)
use_cuda = torch.cuda.is_available() and not model_args.cpu
task = tasks.setup_task(model_args)
captions_dict = task.target_dictionary
models, _model_args = checkpoint_utils.load_model_ensemble(model_args.path.split(':'), task=task)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if model_args.no_beamable_mm else model_args.beam,
need_attn=model_args.print_alignment,
)
if torch.cuda.is_available() and not model_args.cpu:
model.cuda()
generator = task.build_generator(model_args)
tokenizer = encoders.build_tokenizer(model_args)
bpe = encoders.build_bpe(model_args)
def decode(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
sample_ids = read_sample_ids(model_args.input)
image_ids = data.read_image_ids(image_id_path)
assert_sample_id_validity(sample_ids, image_ids)
if model_args.features == 'grid':
image_ds = data.GridFeaturesDataset(grid_features_path, image_ids)
elif model_args.features == 'obj':
image_md = data.read_image_metadata(obj_features_meta_path)
image_ds = data.ObjectFeaturesDataset(obj_features_path, image_ids, image_md)
else:
raise ValueError(f'Invalid --features option: {model_args.features}')
prediction_ids = []
prediction_results = []
for sample_id in tqdm(sample_ids):
features, locations = image_ds.read_data(sample_id)
length = features.shape[0]
if use_cuda:
features = features.cuda()
locations = locations.cuda()
sample = {
'net_input': {
'src_tokens': features.unsqueeze(0),
'src_locations': locations.unsqueeze(0),
'src_lengths': [length]
}
}
translations = task.inference_step(generator, models, sample)
prediction = decode(captions_dict.string(translations[0][0]['tokens']))
prediction_ids.append(sample_id)
prediction_results.append(prediction)
return pd.DataFrame.from_dict(data={
'image_id': prediction_ids,
'caption': prediction_results
})
def read_sample_ids(sample_id_file: str):
with open(sample_id_file) as f:
sample_ids = set([int(line.rstrip('\n')) for line in f])
return sample_ids
def assert_sample_id_validity(sample_ids: iter, image_ids: iter):
invalid_ids = [i for i in sample_ids if i not in image_ids]
if len(invalid_ids) > 0:
raise ValueError('Input sample ids {} are not present in the specified split.'.format(invalid_ids))
def print_predictions(predictions: pd.DataFrame) -> None:
print('Predictions:')
print('============')
for sample_id, pred in predictions.to_numpy():
print('{}: {}'.format(sample_id, pred))
def store_predictions_as_csv(predictions: pd.DataFrame, file_path: str) -> None:
print('\nWriting predictions to file "{}".'.format(file_path))
predictions.to_json(file_path, orient='records')
def cli_main():
script_parser = get_script_parser()
script_args, extra = script_parser.parse_known_args()
parser = options.get_generation_parser(interactive=True, default_task='captioning')
model_args = options.parse_args_and_arch(parser, input_args=extra)
main(script_args, model_args)
def get_script_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--split', choices=['train', 'valid', 'test'], required=True,
help='The dataset split containing the samples provided in the input file (train|valid|test).')
parser.add_argument('--output', type=str,
help='An optional output file used to store the predictions in json-format.')
parser.add_argument('--no-console-output', action='store_true',
help='Suppress printing the prediction results to the console.')
return parser
if __name__ == '__main__':
cli_main()
|
the-stack_0_27880
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a placement by its id. To determine which ad units
exist, run get_all_placements.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201302')
# Set the id of the placement to get.
placement_id = 'INSERT_PLACEMENT_ID_HERE'
# Get placement.
placement = placement_service.GetPlacement(placement_id)[0]
# Display results.
print ('Placement with id \'%s\', name \'%s\', and status \'%s\' was found.'
% (placement['id'], placement['name'], placement['status']))
|
the-stack_0_27881
|
"""
MAP Client Plugin Step
"""
from PySide2 import QtGui
import json
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.pointwiserigidregistrationstep.configuredialog import ConfigureDialog
from gias2.registration import alignment_fitting as AF
from gias2.common import transform3D
from mapclientplugins.pointwiserigidregistrationstep.mayaviregistrationviewerwidget import \
MayaviRegistrationViewerWidget
from gias2.mappluginutils.datatypes import transformations as T
import numpy as np
regMethods = {
'Correspondent Rigid': AF.fitRigid,
'Correspondent Rigid+Scale': AF.fitRigidScale,
'Correspondent Affine': AF.fitAffine,
'ICP Rigid Source-Target': AF.fitDataRigidEPDP,
'ICP Rigid Target-Source': AF.fitDataRigidDPEP,
'ICP Rigid+Scale Source-Target': AF.fitDataRigidScaleEPDP,
'ICP Rigid+Scale Target-Source': AF.fitDataRigidScaleDPEP,
}
regMethodTransforms = {
'Correspondent Rigid': T.RigidTransformAboutPoint,
'Correspondent Rigid+Scale': T.RigidScaleTransformAboutPoint,
'Correspondent Affine': T.AffineTransform,
'ICP Rigid Source-Target': T.RigidTransformAboutPoint,
'ICP Rigid Target-Source': T.RigidTransformAboutPoint,
'ICP Rigid+Scale Source-Target': T.RigidScaleTransformAboutPoint,
'ICP Rigid+Scale Target-Source': T.RigidScaleTransformAboutPoint,
}
class PointWiseRigidRegistrationStep(WorkflowStepMountPoint):
"""
Step for rigid-body and scaling registration of 2 point clouds.
"""
def __init__(self, location):
super(PointWiseRigidRegistrationStep, self).__init__('Point-wise Rigid Registration', location)
self._configured = False # A step cannot be executed until it has been configured.
self._category = 'Registration'
# Add any other initialisation code here:
self._icon = QtGui.QImage(':/pointwiserigidregistrationstep/images/pointwiserigidregicon.png')
# Ports:
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#pointcloud')) # source
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#pointcloud')) # target
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'http://physiomeproject.org/workflow/1.0/rdf-schema#pointcloud'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'ju#geometrictransform'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'python#float'))
self._config = {}
self._config['identifier'] = ''
self._config['UI Mode'] = True
self._config['Registration Method'] = 'Correspondent Affine'
self._config['Min Relative Error'] = '1e-3'
self._config['Points to Sample'] = '1000'
self._config['Init Trans'] = [0.0, 0.0, 0.0]
self._config['Init Rot'] = [0.0, 0.0, 0.0]
self._config['Init Scale'] = 1.0
self.sourceData = None
self.targetData = None
self.sourceDataAligned = None
self.transform = None
self.RMSE = -1.0
self._widget = None
def execute(self):
"""
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This method
may be connected up to a button in a widget for example.
"""
# Put your execute step code here before calling the '_doneExecution' method.
if self._config['UI Mode']:
self._widget = MayaviRegistrationViewerWidget(
self.sourceData, self.targetData, self._config,
self._register, sorted(regMethods.keys()),
self._manualTransform
)
self._widget._ui.acceptButton.clicked.connect(self._doneExecution)
self._widget._ui.abortButton.clicked.connect(self._abort)
self._widget._ui.resetButton.clicked.connect(self._reset)
self._setCurrentWidget(self._widget)
else:
self._register()
self._doneExecution()
def _makeX0(self):
t0 = self._config['Init Trans']
r0 = [np.deg2rad(x) for x in self._config['Init Rot']]
s0 = self._config['Init Scale']
# auto initialise translation
if t0 == [0, 0, 0]:
t0 = self.targetData.mean(0) - self.sourceData.mean(0)
print('t0, r0, s0:', t0, r0, s0)
reg = self._config['Registration Method']
if reg == 'Correspondent Affine':
return None
elif 'Rigid+Scale' in reg:
return np.hstack([t0, r0, s0])
elif 'Rigid' in reg:
return np.hstack([t0, r0])
else:
return None
def _manualTransform(self):
t0 = self._config['Init Trans']
r0 = [np.deg2rad(x) for x in self._config['Init Rot']]
s0 = self._config['Init Scale']
print('t0, r0, s0:', t0, r0, s0)
# apply transform to source points
T = np.hstack([t0, r0, s0])
self.sourceDataAligned = transform3D.transformRigidScale3DAboutCoM(self.sourceData, T)
self.transform = regMethodTransforms[self._config['Registration Method']](T)
self.transform.setP(self.sourceData.mean(0))
return self.transform, self.sourceDataAligned
def _register(self):
reg = regMethods[self._config['Registration Method']]
xtol = float(self._config['Min Relative Error'])
samples = int(self._config['Points to Sample'])
x0 = self._makeX0()
print('T0:', x0)
if x0 is None:
T, self.sourceDataAligned, \
(rmse0, self.RMSE) = reg(self.sourceData, self.targetData, xtol=xtol,
sample=samples, outputErrors=True)
else:
T, self.sourceDataAligned, \
(rmse0, self.RMSE) = reg(self.sourceData, self.targetData, t0=x0, xtol=xtol,
sample=samples, outputErrors=True)
self.transform = regMethodTransforms[self._config['Registration Method']](T)
if self._config['Registration Method'] != 'Correspondent Affine':
self.transform.setP(self.sourceData.mean(0))
print('Registered...')
print('RMSE:', self.RMSE)
print('T:', T)
return self.transform, self.sourceDataAligned, self.RMSE
def _abort(self):
# self._doneExecution()
raise RuntimeError('registration aborted')
def _reset(self):
self.sourceDataAligned = None
self.transform = None
self.RMSE = None
def setPortData(self, index, dataIn):
"""
Add your code here that will set the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
uses port for this step then the index can be ignored.
"""
if index == 0:
self.sourceData = np.array(dataIn, dtype=float) # ju#pointcloud
else:
self.targetData = np.array(dataIn, dtype=float) # ju#pointcloud
def getPortData(self, index):
"""
Add your code here that will return the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
provides port for this step then the index can be ignored.
"""
if index == 2:
portData2 = self.sourceDataAligned # ju#pointcloud
return portData2.tolist()
elif index == 3:
portData3 = self.transform # ju#rigidtransformvector
return portData3
else:
portData4 = self.RMSE # ju#float
return portData4
def configure(self):
"""
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
"""
dlg = ConfigureDialog(sorted(regMethods.keys()), self._main_window)
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
"""
The identifier is a string that must be unique within a workflow.
"""
return self._config['identifier']
def setIdentifier(self, identifier):
"""
The framework will set the identifier for this step when it is loaded.
"""
self._config['identifier'] = identifier
def serialize(self):
"""
Add code to serialize this step to disk. Returns a json string for
mapclient to serialise.
"""
return json.dumps(self._config, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def deserialize(self, string):
"""
Add code to deserialize this step from disk. Parses a json string
given by mapclient
"""
self._config.update(json.loads(string))
self._parseLegacyParams()
d = ConfigureDialog(sorted(regMethods.keys()))
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
def _parseLegacyParams(self):
"""
Turn strs of lists into lists in config
"""
def _parseStrList(s):
_s1 = s[1:-1]
return [float(x) for x in _s1.split(',')]
if isinstance(self._config['Init Trans'], str):
self._config['Init Trans'] = _parseStrList(self._config['Init Trans'])
if isinstance(self._config['Init Rot'], str):
self._config['Init Rot'] = _parseStrList(self._config['Init Rot'])
if isinstance(self._config['Init Scale'], str):
self._config['Init Scale'] = float(self._config['Init Scale'])
|
the-stack_0_27882
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.ops import operations as P
from mindspore.common.api import _cell_graph_executor
from mindspore.nn.wrap.cell_wrapper import MicroBatchInterleaved
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul1 = P.MatMul().shard(strategy1)
self.matmul2 = P.MatMul().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul1(x, y)
out = self.matmul2(out, b)
return out
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = P.ReLU()
self.network = network
def construct(self, x, y, b):
predict = self.network(x, y, b)
return self.loss(predict)
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, x, y, b)
def test_micro_batch_interleaved():
"""
Feature: test MicroBatchInterleaved in auto parallel.
Description: net with MicroBatchInterleaved in semi auto parallel.
Expectation: compile done without error.
"""
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
context.set_auto_parallel_context(device_num=8, global_rank=0, gradients_mean=True)
strategy1 = ((4, 2), (2, 1))
strategy2 = ((2, 4), (4, 1))
micro_batch_interleaved = 2
net = MicroBatchInterleaved(NetWithLoss(Net(strategy1, strategy2)), micro_batch_interleaved)
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32 * micro_batch_interleaved, 64]), dtype=ms.float32)
b = Tensor(np.ones([64 * micro_batch_interleaved, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
|
the-stack_0_27883
|
"""Test a ODENet on the MNIST dataset."""
# %%
import torch
from torch import nn
from sacred import Experiment
from pytorch_utils.sacred_utils import read_config, get_model_path, import_source
from training_functions import validate
# %%
ex = Experiment('test_mnist')
@ex.config
def input_config():
"""Parameters for sampling using the given model"""
run_dir = 'runs/ODEMnistClassification/8'
epoch = 'latest'
device = 'cpu'
min_end_time = 10
max_end_time = 100
tol = 1e-3
@ex.automain
def main(run_dir,
epoch,
device,
min_end_time,
max_end_time,
tol,
_log):
config = read_config(run_dir)
_log.info(f"Read config from {run_dir}")
model_ing = import_source(run_dir, "model_ingredient")
model = model_ing.make_model(**{**config['model'], 'device':device}, _log=_log)
path = get_model_path(run_dir, epoch)
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(torch.load(path))
else:
model.load_state_dict(torch.load(path, map_location=device))
model = model.eval()
_log.info(f"Loaded state dict from {path}")
if hasattr(model, "odeblock"):
_log.info(f"Updated times to {[min_end_time, max_end_time]}")
model.odeblock.min_end_time = min_end_time
model.odeblock.max_end_time = max_end_time
model.odeblock.atol = tol
model.odeblock.rtol = tol
data_ing = import_source(run_dir, "data_ingredient")
dset, tl, vl, test_loader = data_ing.make_dataloaders(**{**config['dataset'],
'device':device},
_log=_log)
_log.info("Testing model...")
test_loss, test_acc = validate(model, test_loader)
_log.info(f"Test loss = {test_loss:.6f}, Test accuracy = {test_acc:.4f}")
|
the-stack_0_27885
|
""" # flake8: noqa
pythonanywhere
Usage:
pythonanywhere webapps create <domain_name> <python_version> [--api_key=<api_key>] [--user=<user>]
pythonanywhere webapps delete <domain_name> [--api_key=<api_key>] [--user=<user>]
pythonanywhere webapps reload <domain_name> [--api_key=<api_key>] [--user=<user>]
pythonanywhere webapps update <domain_name> [--python_version=<python_version>] [--virtualenv_path=<virtualenv_path>] [--api_key=<api_key>] [--user=<user>]
pythonanywhere static_mapping create <domain_name> <url> <path> [--api_key=<api_key>] [--user=<user>]
pythonanywhere static_mapping delete <domain_name> <static_id> [--api_key=<api_key>] [--user=<user>]
pythonanywhere static_mapping list <domain_name> [--api_key=<api_key>] [--user=<user>]
pythonanywhere static_mapping update <domain_name> <static_id> [--url=<url>] [--path=<path>] [--api_key=<api_key>] [--user=<user>]
pythonanywhere -h | --help
pythonanywhere --version
Options:
-h --help Show this screen.
--version Show version.
Help:
For help using this tool, please open an issue on the Github repository:
https://github.com/cfc603/pythonanywhere-cli
"""
from docopt import docopt
from . import __version__ as VERSION
from .commands import StaticFile, Webapps
COMMANDS = {
"webapps": "Webapps",
"static_mapping": "StaticFile",
}
def main():
options = docopt(__doc__, version=VERSION)
for command, command_class in COMMANDS.items():
if command in options and options[command]:
command_instance = globals()[command_class](options)
command_instance.run()
|
the-stack_0_27886
|
# hg.py - repository classes for mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import shutil
import stat
from .i18n import _
from .node import (
hex,
nullhex,
nullid,
short,
)
from .pycompat import getattr
from . import (
bookmarks,
bundlerepo,
cacheutil,
cmdutil,
destutil,
discovery,
error,
exchange,
extensions,
httppeer,
localrepo,
lock,
logcmdutil,
logexchange,
merge as mergemod,
mergestate as mergestatemod,
narrowspec,
phases,
pycompat,
requirements,
scmutil,
sshpeer,
statichttprepo,
ui as uimod,
unionrepo,
url,
util,
verify as verifymod,
vfs as vfsmod,
)
from .utils import hashutil
release = lock.release
# shared features
sharedbookmarks = b'bookmarks'
def _local(path):
path = util.expandpath(util.urllocalpath(path))
try:
# we use os.stat() directly here instead of os.path.isfile()
# because the latter started returning `False` on invalid path
# exceptions starting in 3.8 and we care about handling
# invalid paths specially here.
st = os.stat(path)
isfile = stat.S_ISREG(st.st_mode)
# Python 2 raises TypeError, Python 3 ValueError.
except (TypeError, ValueError) as e:
raise error.Abort(
_(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
)
except OSError:
isfile = False
return isfile and bundlerepo or localrepo
def addbranchrevs(lrepo, other, branches, revs):
peer = other.peer() # a courtesy to callers using a localrepo for other
hashbranch, branches = branches
if not hashbranch and not branches:
x = revs or None
if revs:
y = revs[0]
else:
y = None
return x, y
if revs:
revs = list(revs)
else:
revs = []
if not peer.capable(b'branchmap'):
if branches:
raise error.Abort(_(b"remote branch lookup not supported"))
revs.append(hashbranch)
return revs, revs[0]
with peer.commandexecutor() as e:
branchmap = e.callcommand(b'branchmap', {}).result()
def primary(branch):
if branch == b'.':
if not lrepo:
raise error.Abort(_(b"dirstate branch not accessible"))
branch = lrepo.dirstate.branch()
if branch in branchmap:
revs.extend(hex(r) for r in reversed(branchmap[branch]))
return True
else:
return False
for branch in branches:
if not primary(branch):
raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
if hashbranch:
if not primary(hashbranch):
revs.append(hashbranch)
return revs, revs[0]
def parseurl(path, branches=None):
'''parse url#branch, returning (url, (branch, branches))'''
u = util.url(path)
branch = None
if u.fragment:
branch = u.fragment
u.fragment = None
return bytes(u), (branch, branches or [])
schemes = {
b'bundle': bundlerepo,
b'union': unionrepo,
b'file': _local,
b'http': httppeer,
b'https': httppeer,
b'ssh': sshpeer,
b'static-http': statichttprepo,
}
def _peerlookup(path):
u = util.url(path)
scheme = u.scheme or b'file'
thing = schemes.get(scheme) or schemes[b'file']
try:
return thing(path)
except TypeError:
# we can't test callable(thing) because 'thing' can be an unloaded
# module that implements __call__
if not util.safehasattr(thing, b'instance'):
raise
return thing
def islocal(repo):
'''return true if repo (or path pointing to repo) is local'''
if isinstance(repo, bytes):
try:
return _peerlookup(repo).islocal(repo)
except AttributeError:
return False
return repo.local()
def openpath(ui, path, sendaccept=True):
'''open path with open if local, url.open if remote'''
pathurl = util.url(path, parsequery=False, parsefragment=False)
if pathurl.islocal():
return util.posixfile(pathurl.localpath(), b'rb')
else:
return url.open(ui, path, sendaccept=sendaccept)
# a list of (ui, repo) functions called for wire peer initialization
wirepeersetupfuncs = []
def _peerorrepo(
ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
):
"""return a repository object for the specified path"""
obj = _peerlookup(path).instance(
ui, path, create, intents=intents, createopts=createopts
)
ui = getattr(obj, "ui", ui)
for f in presetupfuncs or []:
f(ui, obj)
ui.log(b'extension', b'- executing reposetup hooks\n')
with util.timedcm('all reposetup') as allreposetupstats:
for name, module in extensions.extensions(ui):
ui.log(b'extension', b' - running reposetup for %s\n', name)
hook = getattr(module, 'reposetup', None)
if hook:
with util.timedcm('reposetup %r', name) as stats:
hook(ui, obj)
ui.log(
b'extension', b' > reposetup for %s took %s\n', name, stats
)
ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
if not obj.local():
for f in wirepeersetupfuncs:
f(ui, obj)
return obj
def repository(
ui,
path=b'',
create=False,
presetupfuncs=None,
intents=None,
createopts=None,
):
"""return a repository object for the specified path"""
peer = _peerorrepo(
ui,
path,
create,
presetupfuncs=presetupfuncs,
intents=intents,
createopts=createopts,
)
repo = peer.local()
if not repo:
raise error.Abort(
_(b"repository '%s' is not local") % (path or peer.url())
)
return repo.filtered(b'visible')
def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
'''return a repository peer for the specified path'''
rui = remoteui(uiorrepo, opts)
return _peerorrepo(
rui, path, create, intents=intents, createopts=createopts
).peer()
def defaultdest(source):
"""return default destination of clone if none is given
>>> defaultdest(b'foo')
'foo'
>>> defaultdest(b'/foo/bar')
'bar'
>>> defaultdest(b'/')
''
>>> defaultdest(b'')
''
>>> defaultdest(b'http://example.org/')
''
>>> defaultdest(b'http://example.org/foo/')
'foo'
"""
path = util.url(source).path
if not path:
return b''
return os.path.basename(os.path.normpath(path))
def sharedreposource(repo):
"""Returns repository object for source repository of a shared repo.
If repo is not a shared repository, returns None.
"""
if repo.sharedpath == repo.path:
return None
if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
return repo.srcrepo
# the sharedpath always ends in the .hg; we want the path to the repo
source = repo.vfs.split(repo.sharedpath)[0]
srcurl, branches = parseurl(source)
srcrepo = repository(repo.ui, srcurl)
repo.srcrepo = srcrepo
return srcrepo
def share(
ui,
source,
dest=None,
update=True,
bookmarks=True,
defaultpath=None,
relative=False,
):
'''create a shared repository'''
if not islocal(source):
raise error.Abort(_(b'can only share local repositories'))
if not dest:
dest = defaultdest(source)
else:
dest = ui.expandpath(dest)
if isinstance(source, bytes):
origsource = ui.expandpath(source)
source, branches = parseurl(origsource)
srcrepo = repository(ui, source)
rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
else:
srcrepo = source.local()
checkout = None
shareditems = set()
if bookmarks:
shareditems.add(sharedbookmarks)
r = repository(
ui,
dest,
create=True,
createopts={
b'sharedrepo': srcrepo,
b'sharedrelative': relative,
b'shareditems': shareditems,
},
)
postshare(srcrepo, r, defaultpath=defaultpath)
r = repository(ui, dest)
_postshareupdate(r, update, checkout=checkout)
return r
def _prependsourcehgrc(repo):
"""copies the source repo config and prepend it in current repo .hg/hgrc
on unshare. This is only done if the share was perfomed using share safe
method where we share config of source in shares"""
srcvfs = vfsmod.vfs(repo.sharedpath)
dstvfs = vfsmod.vfs(repo.path)
if not srcvfs.exists(b'hgrc'):
return
currentconfig = b''
if dstvfs.exists(b'hgrc'):
currentconfig = dstvfs.read(b'hgrc')
with dstvfs(b'hgrc', b'wb') as fp:
sourceconfig = srcvfs.read(b'hgrc')
fp.write(b"# Config copied from shared source\n")
fp.write(sourceconfig)
fp.write(b'\n')
fp.write(currentconfig)
def unshare(ui, repo):
"""convert a shared repository to a normal one
Copy the store data to the repo and remove the sharedpath data.
Returns a new repository object representing the unshared repository.
The passed repository object is not usable after this function is
called.
"""
with repo.lock():
# we use locks here because if we race with commit, we
# can end up with extra data in the cloned revlogs that's
# not pointed to by changesets, thus causing verify to
# fail
destlock = copystore(ui, repo, repo.path)
with destlock or util.nullcontextmanager():
if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
# we were sharing .hg/hgrc of the share source with the current
# repo. We need to copy that while unsharing otherwise it can
# disable hooks and other checks
_prependsourcehgrc(repo)
sharefile = repo.vfs.join(b'sharedpath')
util.rename(sharefile, sharefile + b'.old')
repo.requirements.discard(requirements.SHARED_REQUIREMENT)
repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
scmutil.writereporequirements(repo)
# Removing share changes some fundamental properties of the repo instance.
# So we instantiate a new repo object and operate on it rather than
# try to keep the existing repo usable.
newrepo = repository(repo.baseui, repo.root, create=False)
# TODO: figure out how to access subrepos that exist, but were previously
# removed from .hgsub
c = newrepo[b'.']
subs = c.substate
for s in sorted(subs):
c.sub(s).unshare()
localrepo.poisonrepository(repo)
return newrepo
def postshare(sourcerepo, destrepo, defaultpath=None):
"""Called after a new shared repo is created.
The new repo only has a requirements file and pointer to the source.
This function configures additional shared data.
Extensions can wrap this function and write additional entries to
destrepo/.hg/shared to indicate additional pieces of data to be shared.
"""
default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
if default:
template = b'[paths]\ndefault = %s\n'
destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
with destrepo.wlock():
narrowspec.copytoworkingcopy(destrepo)
def _postshareupdate(repo, update, checkout=None):
"""Maybe perform a working directory update after a shared repo is created.
``update`` can be a boolean or a revision to update to.
"""
if not update:
return
repo.ui.status(_(b"updating working directory\n"))
if update is not True:
checkout = update
for test in (checkout, b'default', b'tip'):
if test is None:
continue
try:
uprev = repo.lookup(test)
break
except error.RepoLookupError:
continue
_update(repo, uprev)
def copystore(ui, srcrepo, destpath):
"""copy files from store of srcrepo in destpath
returns destlock
"""
destlock = None
try:
hardlink = None
topic = _(b'linking') if hardlink else _(b'copying')
with ui.makeprogress(topic, unit=_(b'files')) as progress:
num = 0
srcpublishing = srcrepo.publishing()
srcvfs = vfsmod.vfs(srcrepo.sharedpath)
dstvfs = vfsmod.vfs(destpath)
for f in srcrepo.store.copylist():
if srcpublishing and f.endswith(b'phaseroots'):
continue
dstbase = os.path.dirname(f)
if dstbase and not dstvfs.exists(dstbase):
dstvfs.mkdir(dstbase)
if srcvfs.exists(f):
if f.endswith(b'data'):
# 'dstbase' may be empty (e.g. revlog format 0)
lockfile = os.path.join(dstbase, b"lock")
# lock to avoid premature writing to the target
destlock = lock.lock(dstvfs, lockfile)
hardlink, n = util.copyfiles(
srcvfs.join(f), dstvfs.join(f), hardlink, progress
)
num += n
if hardlink:
ui.debug(b"linked %d files\n" % num)
else:
ui.debug(b"copied %d files\n" % num)
return destlock
except: # re-raises
release(destlock)
raise
def clonewithshare(
ui,
peeropts,
sharepath,
source,
srcpeer,
dest,
pull=False,
rev=None,
update=True,
stream=False,
):
"""Perform a clone using a shared repo.
The store for the repository will be located at <sharepath>/.hg. The
specified revisions will be cloned or pulled from "source". A shared repo
will be created at "dest" and a working copy will be created if "update" is
True.
"""
revs = None
if rev:
if not srcpeer.capable(b'lookup'):
raise error.Abort(
_(
b"src repository does not support "
b"revision lookup and so doesn't "
b"support clone by revision"
)
)
# TODO this is batchable.
remoterevs = []
for r in rev:
with srcpeer.commandexecutor() as e:
remoterevs.append(
e.callcommand(
b'lookup',
{
b'key': r,
},
).result()
)
revs = remoterevs
# Obtain a lock before checking for or cloning the pooled repo otherwise
# 2 clients may race creating or populating it.
pooldir = os.path.dirname(sharepath)
# lock class requires the directory to exist.
try:
util.makedir(pooldir, False)
except OSError as e:
if e.errno != errno.EEXIST:
raise
poolvfs = vfsmod.vfs(pooldir)
basename = os.path.basename(sharepath)
with lock.lock(poolvfs, b'%s.lock' % basename):
if os.path.exists(sharepath):
ui.status(
_(b'(sharing from existing pooled repository %s)\n') % basename
)
else:
ui.status(
_(b'(sharing from new pooled repository %s)\n') % basename
)
# Always use pull mode because hardlinks in share mode don't work
# well. Never update because working copies aren't necessary in
# share mode.
clone(
ui,
peeropts,
source,
dest=sharepath,
pull=True,
revs=rev,
update=False,
stream=stream,
)
# Resolve the value to put in [paths] section for the source.
if islocal(source):
defaultpath = os.path.abspath(util.urllocalpath(source))
else:
defaultpath = source
sharerepo = repository(ui, path=sharepath)
destrepo = share(
ui,
sharerepo,
dest=dest,
update=False,
bookmarks=False,
defaultpath=defaultpath,
)
# We need to perform a pull against the dest repo to fetch bookmarks
# and other non-store data that isn't shared by default. In the case of
# non-existing shared repo, this means we pull from the remote twice. This
# is a bit weird. But at the time it was implemented, there wasn't an easy
# way to pull just non-changegroup data.
exchange.pull(destrepo, srcpeer, heads=revs)
_postshareupdate(destrepo, update)
return srcpeer, peer(ui, peeropts, dest)
# Recomputing caches is often slow on big repos, so copy them.
def _copycache(srcrepo, dstcachedir, fname):
"""copy a cache from srcrepo to destcachedir (if it exists)"""
srcfname = srcrepo.cachevfs.join(fname)
dstfname = os.path.join(dstcachedir, fname)
if os.path.exists(srcfname):
if not os.path.exists(dstcachedir):
os.mkdir(dstcachedir)
util.copyfile(srcfname, dstfname)
def clone(
ui,
peeropts,
source,
dest=None,
pull=False,
revs=None,
update=True,
stream=False,
branch=None,
shareopts=None,
storeincludepats=None,
storeexcludepats=None,
depth=None,
):
"""Make a copy of an existing repository.
Create a copy of an existing repository in a new directory. The
source and destination are URLs, as passed to the repository
function. Returns a pair of repository peers, the source and
newly created destination.
The location of the source is added to the new repository's
.hg/hgrc file, as the default to be used for future pulls and
pushes.
If an exception is raised, the partly cloned/updated destination
repository will be deleted.
Arguments:
source: repository object or URL
dest: URL of destination repository to create (defaults to base
name of source repository)
pull: always pull from source repository, even in local case or if the
server prefers streaming
stream: stream raw data uncompressed from repository (fast over
LAN, slow over WAN)
revs: revision to clone up to (implies pull=True)
update: update working directory after clone completes, if
destination is local repository (True means update to default rev,
anything else is treated as a revision)
branch: branches to clone
shareopts: dict of options to control auto sharing behavior. The "pool" key
activates auto sharing mode and defines the directory for stores. The
"mode" key determines how to construct the directory name of the shared
repository. "identity" means the name is derived from the node of the first
changeset in the repository. "remote" means the name is derived from the
remote's path/URL. Defaults to "identity."
storeincludepats and storeexcludepats: sets of file patterns to include and
exclude in the repository copy, respectively. If not defined, all files
will be included (a "full" clone). Otherwise a "narrow" clone containing
only the requested files will be performed. If ``storeincludepats`` is not
defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
``path:.``. If both are empty sets, no files will be cloned.
"""
if isinstance(source, bytes):
origsource = ui.expandpath(source)
source, branches = parseurl(origsource, branch)
srcpeer = peer(ui, peeropts, source)
else:
srcpeer = source.peer() # in case we were called with a localrepo
branches = (None, branch or [])
origsource = source = srcpeer.url()
revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
if dest is None:
dest = defaultdest(source)
if dest:
ui.status(_(b"destination directory: %s\n") % dest)
else:
dest = ui.expandpath(dest)
dest = util.urllocalpath(dest)
source = util.urllocalpath(source)
if not dest:
raise error.InputError(_(b"empty destination path is not valid"))
destvfs = vfsmod.vfs(dest, expandpath=True)
if destvfs.lexists():
if not destvfs.isdir():
raise error.InputError(_(b"destination '%s' already exists") % dest)
elif destvfs.listdir():
raise error.InputError(_(b"destination '%s' is not empty") % dest)
createopts = {}
narrow = False
if storeincludepats is not None:
narrowspec.validatepatterns(storeincludepats)
narrow = True
if storeexcludepats is not None:
narrowspec.validatepatterns(storeexcludepats)
narrow = True
if narrow:
# Include everything by default if only exclusion patterns defined.
if storeexcludepats and not storeincludepats:
storeincludepats = {b'path:.'}
createopts[b'narrowfiles'] = True
if depth:
createopts[b'shallowfilestore'] = True
if srcpeer.capable(b'lfs-serve'):
# Repository creation honors the config if it disabled the extension, so
# we can't just announce that lfs will be enabled. This check avoids
# saying that lfs will be enabled, and then saying it's an unknown
# feature. The lfs creation option is set in either case so that a
# requirement is added. If the extension is explicitly disabled but the
# requirement is set, the clone aborts early, before transferring any
# data.
createopts[b'lfs'] = True
if extensions.disabled_help(b'lfs'):
ui.status(
_(
b'(remote is using large file support (lfs), but it is '
b'explicitly disabled in the local configuration)\n'
)
)
else:
ui.status(
_(
b'(remote is using large file support (lfs); lfs will '
b'be enabled for this repository)\n'
)
)
shareopts = shareopts or {}
sharepool = shareopts.get(b'pool')
sharenamemode = shareopts.get(b'mode')
if sharepool and islocal(dest):
sharepath = None
if sharenamemode == b'identity':
# Resolve the name from the initial changeset in the remote
# repository. This returns nullid when the remote is empty. It
# raises RepoLookupError if revision 0 is filtered or otherwise
# not available. If we fail to resolve, sharing is not enabled.
try:
with srcpeer.commandexecutor() as e:
rootnode = e.callcommand(
b'lookup',
{
b'key': b'0',
},
).result()
if rootnode != nullid:
sharepath = os.path.join(sharepool, hex(rootnode))
else:
ui.status(
_(
b'(not using pooled storage: '
b'remote appears to be empty)\n'
)
)
except error.RepoLookupError:
ui.status(
_(
b'(not using pooled storage: '
b'unable to resolve identity of remote)\n'
)
)
elif sharenamemode == b'remote':
sharepath = os.path.join(
sharepool, hex(hashutil.sha1(source).digest())
)
else:
raise error.Abort(
_(b'unknown share naming mode: %s') % sharenamemode
)
# TODO this is a somewhat arbitrary restriction.
if narrow:
ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
sharepath = None
if sharepath:
return clonewithshare(
ui,
peeropts,
sharepath,
source,
srcpeer,
dest,
pull=pull,
rev=revs,
update=update,
stream=stream,
)
srclock = destlock = cleandir = None
srcrepo = srcpeer.local()
try:
abspath = origsource
if islocal(origsource):
abspath = os.path.abspath(util.urllocalpath(origsource))
if islocal(dest):
cleandir = dest
copy = False
if (
srcrepo
and srcrepo.cancopy()
and islocal(dest)
and not phases.hassecret(srcrepo)
):
copy = not pull and not revs
# TODO this is a somewhat arbitrary restriction.
if narrow:
copy = False
if copy:
try:
# we use a lock here because if we race with commit, we
# can end up with extra data in the cloned revlogs that's
# not pointed to by changesets, thus causing verify to
# fail
srclock = srcrepo.lock(wait=False)
except error.LockError:
copy = False
if copy:
srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
hgdir = os.path.realpath(os.path.join(dest, b".hg"))
if not os.path.exists(dest):
util.makedirs(dest)
else:
# only clean up directories we create ourselves
cleandir = hgdir
try:
destpath = hgdir
util.makedir(destpath, notindexed=True)
except OSError as inst:
if inst.errno == errno.EEXIST:
cleandir = None
raise error.Abort(
_(b"destination '%s' already exists") % dest
)
raise
destlock = copystore(ui, srcrepo, destpath)
# copy bookmarks over
srcbookmarks = srcrepo.vfs.join(b'bookmarks')
dstbookmarks = os.path.join(destpath, b'bookmarks')
if os.path.exists(srcbookmarks):
util.copyfile(srcbookmarks, dstbookmarks)
dstcachedir = os.path.join(destpath, b'cache')
for cache in cacheutil.cachetocopy(srcrepo):
_copycache(srcrepo, dstcachedir, cache)
# we need to re-init the repo after manually copying the data
# into it
destpeer = peer(srcrepo, peeropts, dest)
srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
else:
try:
# only pass ui when no srcrepo
destpeer = peer(
srcrepo or ui,
peeropts,
dest,
create=True,
createopts=createopts,
)
except OSError as inst:
if inst.errno == errno.EEXIST:
cleandir = None
raise error.Abort(
_(b"destination '%s' already exists") % dest
)
raise
if revs:
if not srcpeer.capable(b'lookup'):
raise error.Abort(
_(
b"src repository does not support "
b"revision lookup and so doesn't "
b"support clone by revision"
)
)
# TODO this is batchable.
remoterevs = []
for rev in revs:
with srcpeer.commandexecutor() as e:
remoterevs.append(
e.callcommand(
b'lookup',
{
b'key': rev,
},
).result()
)
revs = remoterevs
checkout = revs[0]
else:
revs = None
local = destpeer.local()
if local:
if narrow:
with local.wlock(), local.lock():
local.setnarrowpats(storeincludepats, storeexcludepats)
narrowspec.copytoworkingcopy(local)
u = util.url(abspath)
defaulturl = bytes(u)
local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
if not stream:
if pull:
stream = False
else:
stream = None
# internal config: ui.quietbookmarkmove
overrides = {(b'ui', b'quietbookmarkmove'): True}
with local.ui.configoverride(overrides, b'clone'):
exchange.pull(
local,
srcpeer,
revs,
streamclonerequested=stream,
includepats=storeincludepats,
excludepats=storeexcludepats,
depth=depth,
)
elif srcrepo:
# TODO lift restriction once exchange.push() accepts narrow
# push.
if narrow:
raise error.Abort(
_(
b'narrow clone not available for '
b'remote destinations'
)
)
exchange.push(
srcrepo,
destpeer,
revs=revs,
bookmarks=srcrepo._bookmarks.keys(),
)
else:
raise error.Abort(
_(b"clone from remote to remote not supported")
)
cleandir = None
destrepo = destpeer.local()
if destrepo:
template = uimod.samplehgrcs[b'cloned']
u = util.url(abspath)
u.passwd = None
defaulturl = bytes(u)
destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
if ui.configbool(b'experimental', b'remotenames'):
logexchange.pullremotenames(destrepo, srcpeer)
if update:
if update is not True:
with srcpeer.commandexecutor() as e:
checkout = e.callcommand(
b'lookup',
{
b'key': update,
},
).result()
uprev = None
status = None
if checkout is not None:
# Some extensions (at least hg-git and hg-subversion) have
# a peer.lookup() implementation that returns a name instead
# of a nodeid. We work around it here until we've figured
# out a better solution.
if len(checkout) == 20 and checkout in destrepo:
uprev = checkout
elif scmutil.isrevsymbol(destrepo, checkout):
uprev = scmutil.revsymbol(destrepo, checkout).node()
else:
if update is not True:
try:
uprev = destrepo.lookup(update)
except error.RepoLookupError:
pass
if uprev is None:
try:
if destrepo._activebookmark:
uprev = destrepo.lookup(destrepo._activebookmark)
update = destrepo._activebookmark
else:
uprev = destrepo._bookmarks[b'@']
update = b'@'
bn = destrepo[uprev].branch()
if bn == b'default':
status = _(b"updating to bookmark %s\n" % update)
else:
status = (
_(b"updating to bookmark %s on branch %s\n")
) % (update, bn)
except KeyError:
try:
uprev = destrepo.branchtip(b'default')
except error.RepoLookupError:
uprev = destrepo.lookup(b'tip')
if not status:
bn = destrepo[uprev].branch()
status = _(b"updating to branch %s\n") % bn
destrepo.ui.status(status)
_update(destrepo, uprev)
if update in destrepo._bookmarks:
bookmarks.activate(destrepo, update)
if destlock is not None:
release(destlock)
# here is a tiny windows were someone could end up writing the
# repository before the cache are sure to be warm. This is "fine"
# as the only "bad" outcome would be some slowness. That potential
# slowness already affect reader.
with destrepo.lock():
destrepo.updatecaches(full=True)
finally:
release(srclock, destlock)
if cleandir is not None:
shutil.rmtree(cleandir, True)
if srcpeer is not None:
srcpeer.close()
return srcpeer, destpeer
def _showstats(repo, stats, quietempty=False):
if quietempty and stats.isempty():
return
repo.ui.status(
_(
b"%d files updated, %d files merged, "
b"%d files removed, %d files unresolved\n"
)
% (
stats.updatedcount,
stats.mergedcount,
stats.removedcount,
stats.unresolvedcount,
)
)
def updaterepo(repo, node, overwrite, updatecheck=None):
"""Update the working directory to node.
When overwrite is set, changes are clobbered, merged else
returns stats (see pydoc mercurial.merge.applyupdates)"""
repo.ui.deprecwarn(
b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
b'5.7',
)
return mergemod._update(
repo,
node,
branchmerge=False,
force=overwrite,
labels=[b'working copy', b'destination'],
updatecheck=updatecheck,
)
def update(repo, node, quietempty=False, updatecheck=None):
"""update the working directory to node"""
stats = mergemod.update(repo[node], updatecheck=updatecheck)
_showstats(repo, stats, quietempty)
if stats.unresolvedcount:
repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
return stats.unresolvedcount > 0
# naming conflict in clone()
_update = update
def clean(repo, node, show_stats=True, quietempty=False):
"""forcibly switch the working directory to node, clobbering changes"""
stats = mergemod.clean_update(repo[node])
assert stats.unresolvedcount == 0
if show_stats:
_showstats(repo, stats, quietempty)
# naming conflict in updatetotally()
_clean = clean
_VALID_UPDATECHECKS = {
mergemod.UPDATECHECK_ABORT,
mergemod.UPDATECHECK_NONE,
mergemod.UPDATECHECK_LINEAR,
mergemod.UPDATECHECK_NO_CONFLICT,
}
def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
"""Update the working directory with extra care for non-file components
This takes care of non-file components below:
:bookmark: might be advanced or (in)activated
This takes arguments below:
:checkout: to which revision the working directory is updated
:brev: a name, which might be a bookmark to be activated after updating
:clean: whether changes in the working directory can be discarded
:updatecheck: how to deal with a dirty working directory
Valid values for updatecheck are the UPDATECHECK_* constants
defined in the merge module. Passing `None` will result in using the
configured default.
* ABORT: abort if the working directory is dirty
* NONE: don't check (merge working directory changes into destination)
* LINEAR: check that update is linear before merging working directory
changes into destination
* NO_CONFLICT: check that the update does not result in file merges
This returns whether conflict is detected at updating or not.
"""
if updatecheck is None:
updatecheck = ui.config(b'commands', b'update.check')
if updatecheck not in _VALID_UPDATECHECKS:
# If not configured, or invalid value configured
updatecheck = mergemod.UPDATECHECK_LINEAR
if updatecheck not in _VALID_UPDATECHECKS:
raise ValueError(
r'Invalid updatecheck value %r (can accept %r)'
% (updatecheck, _VALID_UPDATECHECKS)
)
with repo.wlock():
movemarkfrom = None
warndest = False
if checkout is None:
updata = destutil.destupdate(repo, clean=clean)
checkout, movemarkfrom, brev = updata
warndest = True
if clean:
ret = _clean(repo, checkout)
else:
if updatecheck == mergemod.UPDATECHECK_ABORT:
cmdutil.bailifchanged(repo, merge=False)
updatecheck = mergemod.UPDATECHECK_NONE
ret = _update(repo, checkout, updatecheck=updatecheck)
if not ret and movemarkfrom:
if movemarkfrom == repo[b'.'].node():
pass # no-op update
elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
b = ui.label(repo._activebookmark, b'bookmarks.active')
ui.status(_(b"updating bookmark %s\n") % b)
else:
# this can happen with a non-linear update
b = ui.label(repo._activebookmark, b'bookmarks')
ui.status(_(b"(leaving bookmark %s)\n") % b)
bookmarks.deactivate(repo)
elif brev in repo._bookmarks:
if brev != repo._activebookmark:
b = ui.label(brev, b'bookmarks.active')
ui.status(_(b"(activating bookmark %s)\n") % b)
bookmarks.activate(repo, brev)
elif brev:
if repo._activebookmark:
b = ui.label(repo._activebookmark, b'bookmarks')
ui.status(_(b"(leaving bookmark %s)\n") % b)
bookmarks.deactivate(repo)
if warndest:
destutil.statusotherdests(ui, repo)
return ret
def merge(
ctx,
force=False,
remind=True,
labels=None,
):
"""Branch merge with node, resolving changes. Return true if any
unresolved conflicts."""
repo = ctx.repo()
stats = mergemod.merge(ctx, force=force, labels=labels)
_showstats(repo, stats)
if stats.unresolvedcount:
repo.ui.status(
_(
b"use 'hg resolve' to retry unresolved file merges "
b"or 'hg merge --abort' to abandon\n"
)
)
elif remind:
repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
return stats.unresolvedcount > 0
def abortmerge(ui, repo):
ms = mergestatemod.mergestate.read(repo)
if ms.active():
# there were conflicts
node = ms.localctx.hex()
else:
# there were no conficts, mergestate was not stored
node = repo[b'.'].hex()
repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
stats = mergemod.clean_update(repo[node])
assert stats.unresolvedcount == 0
_showstats(repo, stats)
def _incoming(
displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
):
"""
Helper for incoming / gincoming.
displaychlist gets called with
(remoterepo, incomingchangesetlist, displayer) parameters,
and is supposed to contain only code that can't be unified.
"""
source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
other = peer(repo, opts, source)
ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
other, chlist, cleanupfn = bundlerepo.getremotechanges(
ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
)
try:
if not chlist:
ui.status(_(b"no changes found\n"))
return subreporecurse()
ui.pager(b'incoming')
displayer = logcmdutil.changesetdisplayer(
ui, other, opts, buffered=buffered
)
displaychlist(other, chlist, displayer)
displayer.close()
finally:
cleanupfn()
subreporecurse()
return 0 # exit code is zero since we found incoming changes
def incoming(ui, repo, source, opts):
def subreporecurse():
ret = 1
if opts.get(b'subrepos'):
ctx = repo[None]
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
ret = min(ret, sub.incoming(ui, source, opts))
return ret
def display(other, chlist, displayer):
limit = logcmdutil.getlimit(opts)
if opts.get(b'newest_first'):
chlist.reverse()
count = 0
for n in chlist:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get(b'no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
return _incoming(display, subreporecurse, ui, repo, source, opts)
def _outgoing(ui, repo, dest, opts):
path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
if not path:
raise error.Abort(
_(b'default repository not configured!'),
hint=_(b"see 'hg help config.paths'"),
)
dest = path.pushloc or path.loc
branches = path.branch, opts.get(b'branch') or []
ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
if revs:
revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
other = peer(repo, opts, dest)
outgoing = discovery.findcommonoutgoing(
repo, other, revs, force=opts.get(b'force')
)
o = outgoing.missing
if not o:
scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
return o, other
def outgoing(ui, repo, dest, opts):
def recurse():
ret = 1
if opts.get(b'subrepos'):
ctx = repo[None]
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
ret = min(ret, sub.outgoing(ui, dest, opts))
return ret
limit = logcmdutil.getlimit(opts)
o, other = _outgoing(ui, repo, dest, opts)
if not o:
cmdutil.outgoinghooks(ui, repo, other, opts, o)
return recurse()
if opts.get(b'newest_first'):
o.reverse()
ui.pager(b'outgoing')
displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in repo.changelog.parents(n) if p != nullid]
if opts.get(b'no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(repo[n])
displayer.close()
cmdutil.outgoinghooks(ui, repo, other, opts, o)
recurse()
return 0 # exit code is zero since we found outgoing changes
def verify(repo, level=None):
"""verify the consistency of a repository"""
ret = verifymod.verify(repo, level=level)
# Broken subrepo references in hidden csets don't seem worth worrying about,
# since they can't be pushed/pulled, and --hidden can be used if they are a
# concern.
# pathto() is needed for -R case
revs = repo.revs(
b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
)
if revs:
repo.ui.status(_(b'checking subrepo links\n'))
for rev in revs:
ctx = repo[rev]
try:
for subpath in ctx.substate:
try:
ret = (
ctx.sub(subpath, allowcreate=False).verify() or ret
)
except error.RepoError as e:
repo.ui.warn(b'%d: %s\n' % (rev, e))
except Exception:
repo.ui.warn(
_(b'.hgsubstate is corrupt in revision %s\n')
% short(ctx.node())
)
return ret
def remoteui(src, opts):
"""build a remote ui from ui or repo and opts"""
if util.safehasattr(src, b'baseui'): # looks like a repository
dst = src.baseui.copy() # drop repo-specific config
src = src.ui # copy target options from repo
else: # assume it's a global ui object
dst = src.copy() # keep all global options
# copy ssh-specific options
for o in b'ssh', b'remotecmd':
v = opts.get(o) or src.config(b'ui', o)
if v:
dst.setconfig(b"ui", o, v, b'copied')
# copy bundle-specific options
r = src.config(b'bundle', b'mainreporoot')
if r:
dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
# copy selected local settings to the remote ui
for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
for key, val in src.configitems(sect):
dst.setconfig(sect, key, val, b'copied')
v = src.config(b'web', b'cacerts')
if v:
dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
return dst
# Files of interest
# Used to check if the repository has changed looking at mtime and size of
# these files.
foi = [
(b'spath', b'00changelog.i'),
(b'spath', b'phaseroots'), # ! phase can change content at the same size
(b'spath', b'obsstore'),
(b'path', b'bookmarks'), # ! bookmark can change content at the same size
]
class cachedlocalrepo(object):
"""Holds a localrepository that can be cached and reused."""
def __init__(self, repo):
"""Create a new cached repo from an existing repo.
We assume the passed in repo was recently created. If the
repo has changed between when it was created and when it was
turned into a cache, it may not refresh properly.
"""
assert isinstance(repo, localrepo.localrepository)
self._repo = repo
self._state, self.mtime = self._repostate()
self._filtername = repo.filtername
def fetch(self):
"""Refresh (if necessary) and return a repository.
If the cached instance is out of date, it will be recreated
automatically and returned.
Returns a tuple of the repo and a boolean indicating whether a new
repo instance was created.
"""
# We compare the mtimes and sizes of some well-known files to
# determine if the repo changed. This is not precise, as mtimes
# are susceptible to clock skew and imprecise filesystems and
# file content can change while maintaining the same size.
state, mtime = self._repostate()
if state == self._state:
return self._repo, False
repo = repository(self._repo.baseui, self._repo.url())
if self._filtername:
self._repo = repo.filtered(self._filtername)
else:
self._repo = repo.unfiltered()
self._state = state
self.mtime = mtime
return self._repo, True
def _repostate(self):
state = []
maxmtime = -1
for attr, fname in foi:
prefix = getattr(self._repo, attr)
p = os.path.join(prefix, fname)
try:
st = os.stat(p)
except OSError:
st = os.stat(prefix)
state.append((st[stat.ST_MTIME], st.st_size))
maxmtime = max(maxmtime, st[stat.ST_MTIME])
return tuple(state), maxmtime
def copy(self):
"""Obtain a copy of this class instance.
A new localrepository instance is obtained. The new instance should be
completely independent of the original.
"""
repo = repository(self._repo.baseui, self._repo.origroot)
if self._filtername:
repo = repo.filtered(self._filtername)
else:
repo = repo.unfiltered()
c = cachedlocalrepo(repo)
c._state = self._state
c.mtime = self.mtime
return c
|
the-stack_0_27888
|
import json
from typing import List, Optional
from eth_typing.evm import HexAddress
from eth_utils import to_checksum_address
from web3 import Web3
from web3.contract import Contract
from raiden_contracts.constants import (
CONTRACT_MONITORING_SERVICE,
CONTRACT_ONE_TO_N,
CONTRACT_SECRET_REGISTRY,
CONTRACT_SERVICE_REGISTRY,
CONTRACT_TOKEN_NETWORK_REGISTRY,
CONTRACT_USER_DEPOSIT,
DeploymentModule,
)
from raiden_contracts.contract_manager import (
ContractManager,
DeployedContracts,
contracts_deployed_path,
contracts_precompiled_path,
get_contracts_deployment_info,
)
from raiden_contracts.utils.type_aliases import ChainID
class ContractVerifier:
def __init__(self, web3: Web3, contracts_version: Optional[str] = None):
self.web3 = web3
self.contracts_version = contracts_version
self.precompiled_path = contracts_precompiled_path(self.contracts_version)
self.contract_manager = ContractManager(self.precompiled_path)
def verify_deployed_contracts_in_filesystem(self) -> None:
chain_id = ChainID(int(self.web3.version.network))
deployment_data = get_contracts_deployment_info(
chain_id=chain_id,
version=self.contract_manager.contracts_version,
module=DeploymentModule.RAIDEN,
)
deployment_file_path = contracts_deployed_path(
chain_id=chain_id, version=self.contract_manager.contracts_version
)
if deployment_data is None:
raise RuntimeError(f"Deployment data cannot be found at {deployment_file_path}")
if self.verify_deployment_data(deployment_data):
print(
f"Deployment info from {deployment_file_path} has been verified "
"and it is CORRECT."
)
def verify_deployed_service_contracts_in_filesystem(
self,
token_address: HexAddress,
user_deposit_whole_balance_limit: int,
token_network_registry_address: HexAddress,
) -> None:
chain_id = ChainID(int(self.web3.version.network))
deployment_data = get_contracts_deployment_info(
chain_id=chain_id,
version=self.contract_manager.contracts_version,
module=DeploymentModule.SERVICES,
)
deployment_file_path = contracts_deployed_path(
chain_id=chain_id, version=self.contract_manager.contracts_version, services=True
)
if deployment_data is None:
raise RuntimeError(f"Deployment data cannot be found at {deployment_file_path}")
if self.verify_service_contracts_deployment_data(
token_address=token_address,
user_deposit_whole_balance_limit=user_deposit_whole_balance_limit,
deployed_contracts_info=deployment_data,
token_network_registry_address=token_network_registry_address,
):
print(
f"Deployment info from {deployment_file_path} has been verified "
"and it is CORRECT."
)
def store_and_verify_deployment_info_raiden(
self, deployed_contracts_info: DeployedContracts
) -> None:
self._store_deployment_info(deployment_info=deployed_contracts_info, services=False)
self.verify_deployed_contracts_in_filesystem()
def store_and_verify_deployment_info_services(
self,
deployed_contracts_info: DeployedContracts,
token_address: HexAddress,
user_deposit_whole_balance_limit: int,
token_network_registry_address: HexAddress,
) -> None:
self._store_deployment_info(services=True, deployment_info=deployed_contracts_info)
self.verify_deployed_service_contracts_in_filesystem(
token_address=token_address,
user_deposit_whole_balance_limit=user_deposit_whole_balance_limit,
token_network_registry_address=token_network_registry_address,
)
def _store_deployment_info(self, services: bool, deployment_info: DeployedContracts) -> None:
deployment_file_path = contracts_deployed_path(
chain_id=ChainID(int(self.web3.version.network)),
version=self.contracts_version,
services=services,
)
with deployment_file_path.open(mode="w") as target_file:
target_file.write(json.dumps(deployment_info, indent=2))
print(
f'Deployment information for chain id = {deployment_info["chain_id"]} '
f" has been updated at {deployment_file_path}."
)
def verify_deployment_data(self, deployment_data: DeployedContracts) -> bool:
chain_id = int(self.web3.version.network)
if self.contract_manager.contracts_version != deployment_data["contracts_version"]:
raise RuntimeError("Version string mismatch.")
if chain_id != deployment_data["chain_id"]:
raise RuntimeError("chain id mismatch.")
secret_registry, _ = self._verify_deployed_contract(
deployment_data=deployment_data, contract_name=CONTRACT_SECRET_REGISTRY
)
token_network_registry, constructor_arguments = self._verify_deployed_contract(
deployment_data=deployment_data, contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY
)
# We need to also check the constructor parameters against the chain
if (
to_checksum_address(token_network_registry.functions.secret_registry_address().call())
!= secret_registry.address
):
raise RuntimeError("secret_registry_address onchain has an unexpected value.")
if len(constructor_arguments) != 5:
raise RuntimeError(
"TokenNetworkRegistry received a wrong number of constructor arguments."
)
if secret_registry.address != constructor_arguments[0]:
raise RuntimeError(
"TokenNetworkRegistry's constructor received a different SecretRegistry address."
)
if token_network_registry.functions.chain_id().call() != constructor_arguments[1]:
raise RuntimeError("TokenNetwork remembers a wrong chain_id.")
assert (
token_network_registry.functions.settlement_timeout_min().call()
== constructor_arguments[2]
)
assert (
token_network_registry.functions.settlement_timeout_max().call()
== constructor_arguments[3]
)
return True
def _verify_deployed_contract(
self, deployment_data: DeployedContracts, contract_name: str
) -> Contract:
""" Verify deployment info against the chain
Verifies:
- the runtime bytecode - precompiled data against the chain
- information stored in deployment_*.json against the chain,
except for the constructor arguments, which have to be checked
separately.
Returns: (onchain_instance, constructor_arguments)
"""
contract_instance = self.contract_instance_from_deployment_data(
deployment_data, contract_name
)
contracts = deployment_data["contracts"]
# Check blockchain transaction hash & block information
receipt = self.web3.eth.getTransactionReceipt(contracts[contract_name]["transaction_hash"])
if receipt["blockNumber"] != contracts[contract_name]["block_number"]:
raise RuntimeError(
f'We have block_number {contracts[contract_name]["block_number"]} in the '
f'deployment info, but {receipt["blockNumber"]} in the transaction receipt '
"from web3."
)
if receipt["gasUsed"] != contracts[contract_name]["gas_cost"]:
raise RuntimeError(
f'We have gasUsed {contracts[contract_name]["gas_cost"]} in the deployment info, '
f'but {receipt["gasUsed"]} in the transaction receipt from web3.'
)
if receipt["contractAddress"] != contracts[contract_name]["address"]:
raise RuntimeError(
f'We have contractAddress {contracts[contract_name]["address"]} in the deployment'
f' info but {receipt["contractAddress"]} in the transaction receipt from web3.'
)
# Check that the deployed bytecode matches the precompiled data
blockchain_bytecode = self.web3.eth.getCode(contract_instance.address).hex()
compiled_bytecode = self.contract_manager.get_runtime_hexcode(contract_name)
if blockchain_bytecode == compiled_bytecode:
print(
f"{contract_name} at {contract_instance.address} "
f"matches the compiled data from contracts.json"
)
else:
raise RuntimeError(f"{contract_name} at {contract_instance.address} has wrong code")
return contract_instance, contracts[contract_name]["constructor_arguments"]
def contract_instance_from_deployment_data(
self, deployment_data: DeployedContracts, contract_name: str
) -> Contract:
contracts = deployment_data["contracts"]
contract_address = contracts[contract_name]["address"]
contract_instance = self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name), address=contract_address
)
return contract_instance
def verify_service_contracts_deployment_data(
self,
token_address: HexAddress,
user_deposit_whole_balance_limit: int,
token_network_registry_address: HexAddress,
deployed_contracts_info: DeployedContracts,
) -> bool:
chain_id = int(self.web3.version.network)
assert deployed_contracts_info is not None
if self.contract_manager.contracts_version != deployed_contracts_info["contracts_version"]:
raise RuntimeError("Version string mismatch")
if chain_id != deployed_contracts_info["chain_id"]:
raise RuntimeError("chain_id mismatch")
service_registry, service_registry_constructor_arguments = self._verify_deployed_contract(
deployment_data=deployed_contracts_info, contract_name=CONTRACT_SERVICE_REGISTRY
)
user_deposit, user_deposit_constructor_arguments = self._verify_deployed_contract(
deployment_data=deployed_contracts_info, contract_name=CONTRACT_USER_DEPOSIT
)
one_to_n, one_to_n_constructor_arguments = self._verify_deployed_contract(
deployment_data=deployed_contracts_info, contract_name=CONTRACT_ONE_TO_N
)
monitoring_service, ms_constructor_arguments = self._verify_deployed_contract(
deployed_contracts_info, CONTRACT_MONITORING_SERVICE
)
_verify_service_registry_deployment(
service_registry=service_registry,
constructor_arguments=service_registry_constructor_arguments,
token_address=token_address,
)
_verify_user_deposit_deployment(
user_deposit=user_deposit,
constructor_arguments=user_deposit_constructor_arguments,
token_address=token_address,
user_deposit_whole_balance_limit=user_deposit_whole_balance_limit,
one_to_n_address=one_to_n.address,
monitoring_service_address=monitoring_service.address,
)
_verify_monitoring_service_deployment(
monitoring_service=monitoring_service,
constructor_arguments=ms_constructor_arguments,
token_address=token_address,
service_registry_address=service_registry.address,
user_deposit_address=user_deposit.address,
token_network_registry_address=token_network_registry_address,
)
_verify_one_to_n_deployment(
one_to_n=one_to_n,
constructor_arguments=one_to_n_constructor_arguments,
user_deposit_address=user_deposit.address,
chain_id=chain_id,
service_registry_address=service_registry.address,
)
return True
def _verify_user_deposit_deployment(
user_deposit: Contract,
constructor_arguments: List,
token_address: HexAddress,
user_deposit_whole_balance_limit: int,
one_to_n_address: HexAddress,
monitoring_service_address: HexAddress,
) -> None:
""" Check an onchain deployment of UserDeposit and constructor arguments at deployment time """
if len(constructor_arguments) != 2:
raise RuntimeError("UserDeposit has a wrong number of constructor arguments.")
if token_address != constructor_arguments[0]:
raise RuntimeError("UserDeposit received a wrong token address during construction.")
if to_checksum_address(user_deposit.functions.token().call()) != token_address:
raise RuntimeError("UserDeposit has a wrong token address onchain.")
if user_deposit.functions.whole_balance_limit().call() != user_deposit_whole_balance_limit:
raise RuntimeError("UserDeposit has a wrong whole_balance_limit onchain")
if user_deposit_whole_balance_limit != constructor_arguments[1]:
raise RuntimeError("UserDeposit received a wrong whole_balance_limit during construction.")
if to_checksum_address(user_deposit.functions.one_to_n_address().call()) != one_to_n_address:
raise RuntimeError("UserDeposit has a wrong OneToN address onchain.")
onchain_msc_address = to_checksum_address(user_deposit.functions.msc_address().call())
if onchain_msc_address != monitoring_service_address:
raise RuntimeError(
f"MSC address found onchain: {onchain_msc_address}, "
f"expected: {monitoring_service_address}"
)
def _verify_monitoring_service_deployment(
monitoring_service: Contract,
constructor_arguments: List,
token_address: HexAddress,
service_registry_address: HexAddress,
user_deposit_address: HexAddress,
token_network_registry_address: HexAddress,
) -> None:
""" Check an onchain deployment of MonitoringService and constructor arguments """
if len(constructor_arguments) != 4:
raise RuntimeError("MonitoringService has a wrong number of constructor arguments.")
if to_checksum_address(monitoring_service.functions.token().call()) != token_address:
raise RuntimeError("MonitoringService has a wrong token address onchain.")
if token_address != constructor_arguments[0]:
raise RuntimeError("MonitoringService received a wrong token address during construction")
if (
to_checksum_address(monitoring_service.functions.service_registry().call())
!= service_registry_address
):
raise RuntimeError("MonitoringService has a wrong ServiceRegistry address onchain.")
if service_registry_address != constructor_arguments[1]:
raise RuntimeError("MonitoringService received a wrong address during construction.")
if (
to_checksum_address(monitoring_service.functions.user_deposit().call())
!= user_deposit_address
):
raise RuntimeError("MonitoringService has a wrong UserDeposit address onchain.")
if user_deposit_address != constructor_arguments[2]:
raise RuntimeError(
"MonitoringService received a wrong UserDeposit address during construction."
)
if (
to_checksum_address(monitoring_service.functions.token_network_registry().call())
!= token_network_registry_address
):
raise RuntimeError("MonitoringService has a wrong TokenNetworkRegistry address onchain.")
if token_network_registry_address != constructor_arguments[3]:
raise RuntimeError(
"MonitoringService received a wrong TokenNetworkRegistry address during construction."
)
def _verify_one_to_n_deployment(
one_to_n: Contract,
constructor_arguments: List,
user_deposit_address: HexAddress,
service_registry_address: HexAddress,
chain_id: int,
) -> None:
""" Check an onchain deployment of OneToN and constructor arguments """
if to_checksum_address(one_to_n.functions.deposit_contract().call()) != user_deposit_address:
raise RuntimeError("OneToN has a wrong UserDeposit address onchain.")
if user_deposit_address != constructor_arguments[0]:
raise RuntimeError("OneToN received a wrong UserDeposit address during construction.")
if chain_id != constructor_arguments[1]:
raise RuntimeError("OneToN received a wrong chain ID during construction.")
if service_registry_address != constructor_arguments[2]:
raise RuntimeError("OneToN received a wrong ServiceRegistry address during construction.")
if len(constructor_arguments) != 3:
raise RuntimeError("OneToN received a wrong number of constructor arguments.")
def _verify_service_registry_deployment(
service_registry: Contract, constructor_arguments: List, token_address: HexAddress
) -> None:
""" Check an onchain deployment of ServiceRegistry and constructor arguments """
if len(constructor_arguments) != 8:
raise RuntimeError(
"ServiceRegistry was deployed with a wrong number of constructor arguments"
)
if to_checksum_address(service_registry.functions.token().call()) != token_address:
raise RuntimeError("ServiceRegistry has a wrong token address")
if token_address != constructor_arguments[0]:
raise RuntimeError(
f"expected token address {token_address} "
f"but the constructor argument for {CONTRACT_SERVICE_REGISTRY} is "
f"{constructor_arguments[0]}"
)
controller_onchain = to_checksum_address(service_registry.functions.controller().call())
if controller_onchain != constructor_arguments[1]:
raise RuntimeError(
f"the deployment data contains the controller address {constructor_arguments[1]} "
f"but the contract remembers {controller_onchain} onchain."
)
# All other parameters can change after the deployment, so the checks are omitted.
|
the-stack_0_27889
|
import json
from decimal import *
from django.db import transaction
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import permissions, generics, status, viewsets, filters
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from callback_functions.transfer_callback import transfer_callback
from .serializer import *
class RegisterView(generics.CreateAPIView):
"""
Create User
"""
permission_classes = (permissions.AllowAny,)
queryset = User.objects.all()
serializer_class = UserSerializer
@transaction.atomic
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.filter(pk=self.request.user.id)
class AccountViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = AccountSerializer
def get_queryset(self):
return Account.objects.filter(user=self.request.user)
class EthAccountViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = EthAccountSerializer
def get_queryset(self):
return EthAccount.objects.filter(user=self.request.user)
@detail_route()
def get_balance(self, request, pk=None):
eth_account = get_object_or_404(EthAccount, address=pk)
address = pk
# Get UTCoin balance
num_suffix = 1000
w3 = Web3(HTTPProvider(settings.WEB3_PROVIDER))
eth_balance = w3.fromWei(w3.eth.getBalance(address), 'ether')
abi = self.load_abi(settings.ARTIFACT_PATH)
UTCoin = w3.eth.contract(abi=abi, address=settings.UTCOIN_ADDRESS)
balance_int = UTCoin.call().balanceOf(address)
balance = float(balance_int / num_suffix)
context = {
'address': address,
'eth_balance': eth_balance,
'balance': balance,
'balance_int': balance_int
}
return Response(context)
@detail_route()
def get_qrcode(self, request, pk=None):
eth_account = get_object_or_404(EthAccount, address=pk)
address = pk
eth_qrcode = eth_account.qrcode
if not eth_qrcode:
# Generate QR code
img = qrcode.make(address)
file_name = address + '.png'
file_path = '/images/qrcode/' + file_name
img.save(settings.MEDIA_ROOT + file_path)
eth_account.qrcode = file_path
eth_account.save()
eth_qrcode = eth_account.qrcode
context = {
'address': address,
'qrcode_url': eth_qrcode.url
}
return Response(context)
@staticmethod
def load_abi(file_path):
"""
:param str file_path:
:return dict: abi
"""
artifact = open(file_path, 'r')
json_dict = json.load(artifact)
abi = json_dict['abi']
return abi
class TransactionViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = TransactionSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ('from_address', 'to_address', 'amount', 'is_active', 'created_at')
ordering_fields = ('id', 'amount', 'created_at')
def get_queryset(self):
address = self.request.user.account.address
return Transaction.objects.filter(Q(from_address=address) | Q(to_address=address))
@list_route(methods=['post'])
@transaction.atomic
def transfer(self, request):
from_account = request.user.account
# Receive params
body = json.loads(request.body)
to_address = body['address']
amount = body['amount']
if not (to_address and amount):
error_msg = 'アドレスまたは金額が入力されていません。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# Validate address
if not self.is_ut_address(to_address):
error_msg = '無効なアドレスです。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
amount = Decimal(amount)
to_account = Account.objects.get(address=to_address)
# Validate amount
if from_account.balance < amount:
error_msg = '送金可能額を超えています。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# UTCoin 送金
with transaction.atomic():
from_account.balance -= amount
to_account.balance += amount
from_account.save()
to_account.save()
# Create Transaction
tx = Transaction.objects.create(
from_address=from_account.address,
to_address=to_address,
amount=amount
)
# TODO: コントラクト実行
# try:
# transfer_callback(tx_hash, from_address, to_address, amount_int, amount)
# except Exception as e:
# print(e)
# error_msg = 'コールバック処理に失敗しました。'
# print('Error:', error_msg)
#
# else:
# error_msg = 'アカウントのアンロックに失敗しました。'
# print('Error:', error_msg)
# context = {
# 'success': False,
# 'detail': error_msg
# }
# return Response(context)
context = {
'success': True,
'transaction': TransactionSerializer(tx).data
}
return Response(context, status=status.HTTP_201_CREATED)
@staticmethod
def is_ut_address(address):
"""
:param str address:
:return bool:
"""
if address[0:2] == 'UT' and len(address) == 42:
if Account.objects.filter(address=address).exists():
return True
return False
class EthTransactionViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = EthTransactionSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter)
filter_fields = (
'tx_hash', 'from_address', 'to_address', 'amount', 'gas', 'gas_price', 'value', 'network_id', 'is_active',
'created_at')
ordering_fields = ('id', 'amount', 'gas', 'gas_price', 'value', 'created_at')
def get_queryset(self):
eth_account = get_object_or_404(EthAccount, user=self.request.user)
address = eth_account.address
return EthTransaction.objects.filter(Q(from_address=address) | Q(to_address=address))
@list_route(methods=['post'])
@transaction.atomic
def transfer(self, request):
eth_account = get_object_or_404(EthAccount, user=request.user)
from_address = eth_account.address
num_suffix = 1000
amount_min = 1 / num_suffix
fee = 0.001
# Receive params
body = json.loads(request.body)
to_address = body['address']
amount = body['amount']
if not (to_address and amount):
error_msg = 'アドレスまたは金額が入力されていません。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
amount = float(amount)
amount_int = int(amount * num_suffix)
# Validate address
w3 = Web3(HTTPProvider(settings.WEB3_PROVIDER))
if not w3.isAddress(to_address):
error_msg = '無効なアドレスです。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# Validate amount
if amount < amount_min:
error_msg = '金額が不正です。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# Get UTCoin balance
abi = self.load_abi(settings.ARTIFACT_PATH)
UTCoin = w3.eadth.contract(abi=abi, address=settings.UTCOIN_ADDRESS)
balance = UTCoin.call().balanceOf(from_address)
if balance < amount + fee:
error_msg = '残高が不足しています。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# Transfer UTCoin
if w3.personal.unlockAccount(from_address, eth_account.password, duration=hex(300)):
try:
tx_hash = UTCoin.transact({'from': from_address}).transfer(to_address, amount_int)
# Create Transaction
transaction_info = w3.eth.getTransaction(tx_hash)
Transaction.objects.create(
tx_hash=tx_hash,
from_address=from_address,
to_address=to_address,
amount=amount_int,
gas=transaction_info['gas'],
gas_price=transaction_info['gasPrice'],
value=transaction_info['value'],
network_id=transaction_info['networkId']
)
except Exception as e:
print(e)
error_msg = 'トランザクションに失敗しました。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
# Execute callback function
try:
transfer_callback(tx_hash, from_address, to_address, amount_int, amount)
except Exception as e:
print(e)
error_msg = 'コールバック処理に失敗しました。'
print('Error:', error_msg)
else:
error_msg = 'アカウントのアンロックに失敗しました。'
print('Error:', error_msg)
context = {
'success': False,
'detail': error_msg
}
return Response(context)
context = {
'success': True,
'address': to_address,
'amount': amount,
'fee': fee,
'transaction': TransactionSerializer(transaction).data
}
return Response(context, status=status.HTTP_201_CREATED)
@staticmethod
def load_abi(file_path):
"""
:param str file_path:
:return dict: abi
"""
artifact = open(file_path, 'r')
json_dict = json.load(artifact)
abi = json_dict['abi']
return abi
class ContractViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = ContractSerializer
queryset = Contract.objects.all()
def list(self, request):
queryset = Contract.objects.filter(user=self.request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Contract.objects.all()
contract = get_object_or_404(queryset, address=pk)
serializer = ContractSerializer(contract)
return Response(serializer.data)
|
the-stack_0_27890
|
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations when
dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
__docformat__ = "restructuredtext en"
import os
from shutil import rmtree, copyfile, copyfileobj
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way that
an instance of `_FileOpeners` itself can be indexed with the keys of that
dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
`destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by path.
Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created. The
default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in list(_file_openers.keys())
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in list(_file_openers.keys()):
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib.request import urlopen
from urllib.error import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
copyfileobj(openedurl, f)
finally:
f.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return
the path to the cached file.
If path is a local file, _findfile will return a path to that local
file.
The search will include possible compressed versions of the file and
return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib.request import urlopen
from urllib.error import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or directory)
to all the files it handles. Use `Repository` when you will be working
with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the DataSource
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError("Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
|
the-stack_0_27891
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Lw4O6(A10BaseClass):
"""Class Description::
Configure LW-4over6 interface.
Class lw-4o6 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param outside: {"default": 0, "optional": true, "type": "number", "description": "Configure LW-4over6 inside interface", "format": "flag"}
:param inside: {"default": 0, "optional": true, "type": "number", "description": "Configure LW-4over6 outside interface", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ethernet/{ifnum}/lw-4o6`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "lw-4o6"
self.a10_url="/axapi/v3/interface/ethernet/{ifnum}/lw-4o6"
self.DeviceProxy = ""
self.outside = ""
self.inside = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
the-stack_0_27892
|
# -*- coding: utf-8 -*-
import random
import time
import pytest
from sqlalchemy.exc import IntegrityError
from sqlalchemy_mate.crud.inserting import smart_insert
from sqlalchemy_mate.crud.selecting import count_row
from sqlalchemy_mate.crud.deleting import delete_all
from sqlalchemy_mate.tests import (
IS_WINDOWS,
engine_sqlite, engine_psql, t_smart_insert, BaseTest
)
class InsertingApiBaseTest(BaseTest):
def teardown_method(self, method):
"""
Make sure data in all table is cleared after each test cases.
"""
self.delete_all_data_in_core_table()
def test_smart_insert(self):
"""
Test performance of smart insert.
**中文文档**
测试smart_insert的基本功能, 以及与普通的insert比较性能。
"""
# Use Smart Insert Method
# ------ Before State ------
scale = 10 # 测试数据的数量级, 总数据量是已有的数据量的立方, 建议 5 ~ 10
n_exist = scale
n_all = scale ** 3
exist_id_list = [random.randint(1, n_all) for _ in range(n_exist)]
exist_id_list = list(set(exist_id_list))
exist_id_list.sort()
n_exist = len(exist_id_list)
# Smart Insert
exist_data = [{"id": id} for id in exist_id_list]
all_data = [{"id": i} for i in range(1, 1 + n_all)]
op_count, ins_count = smart_insert(self.engine, t_smart_insert, exist_data, 5)
assert op_count == 1
assert ins_count == n_exist
assert count_row(self.engine, t_smart_insert) == n_exist
# ------ Invoke ------
st = time.process_time()
op_count, ins_count = smart_insert(self.engine, t_smart_insert, all_data, 5)
assert op_count <= (0.5 * n_all)
assert ins_count == (n_all - n_exist)
elapse1 = time.process_time() - st
# ------ After State ------
assert count_row(self.engine, t_smart_insert) == n_all
# Use Regular Insert Method
# ------ Before State ------
with self.engine.connect() as connection:
connection.execute(t_smart_insert.delete())
ins = t_smart_insert.insert()
with self.engine.connect() as connection:
connection.execute(ins, exist_data)
assert count_row(self.engine, t_smart_insert) == n_exist
# ------ Invoke ------
st = time.process_time()
with self.engine.connect() as connection:
for row in all_data:
try:
connection.execute(ins, row)
except IntegrityError:
pass
elapse2 = time.process_time() - st
assert count_row(self.engine, t_smart_insert) == n_all
# ------ After State ------
assert elapse1 < elapse2
def test_smart_insert_single_row(self):
assert count_row(self.engine, t_smart_insert) == 0
data = {"id": 1}
op_count, ins_count = smart_insert(self.engine, t_smart_insert, data)
assert op_count == 1
assert ins_count == 1
assert count_row(self.engine, t_smart_insert) == 1
op_count, ins_count = smart_insert(self.engine, t_smart_insert, data)
assert op_count == 0
assert ins_count == 0
assert count_row(self.engine, t_smart_insert) == 1
class TestInsertingApiSqlite(InsertingApiBaseTest):
engine = engine_sqlite
@pytest.mark.skipif(
IS_WINDOWS,
reason="no psql service container for windows",
)
class TestInsertingApiPostgres(InsertingApiBaseTest):
engine = engine_psql
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
|
the-stack_0_27894
|
# -*- coding: utf-8 -*-
"""
从列表中找出最大的或最小的N个元素
堆结构(大根堆/小根堆)
"""
import heapq
list1 = [34, 25, 12, 99, 87, 63, 58, 78, 88, 92]
list2 = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
print(heapq.nlargest(3, list1))
print(heapq.nsmallest(3, list1))
print(heapq.nlargest(2, list2, key=lambda x: x['price']))
print(heapq.nlargest(2, list2, key=lambda x: x['shares']))
|
the-stack_0_27896
|
import asyncio
from utilities import utilities
from Db.dev_sql import addDevUser, getDevsUsers, getDevUser, remDevUser
from telethon import utils, errors
import re
async def addDev_user(message, from_id):
try:
if getDevUser(from_id):
return await message.reply("User already added as Dev.")
addDevUser(from_id)
utilities.devs.append(from_id)
return await message.reply("❏︙تم رفع مطور")
except Exception as e:
utilities.prRed(str(type(e)) + " Error : " + str(e))
return await message.reply(str(e))
async def remDev_user(message, from_id):
try:
if not getDevUser(from_id):
return await message.reply("User already not dev.")
remDevUser(from_id)
utilities.devs.remove(from_id)
return await message.reply("❏︙تم تنزيل مطور")
except Exception as e:
utilities.prRed(str(type(e)) + " Error : " + str(e))
return await message.reply(str(e))
async def run(message, matches, chat_id, step, crons=None):
response = []
if message.sender_id not in utilities.config["sudo_members"]:
return []
if matches == "المطورين":
devlist = getDevsUsers()
res = ""
i = 1
for user in devlist:
userId = int("%.0f" % user.user_id)
try:
_user = await utilities.client.get_entity(userId)
strin = (
str(i)
+ " - [%s](tg://user?id=%s)"
% (_user.first_name, int("%.0f" % userId))
+ "\n"
)
except Exception as e:
strin = (
str(i)
+ " - [%s](tg://user?id=%s)"
% (("dev" + str(i)), int("%.0f" % userId))
+ "\n"
)
i += 1
res = res + strin
return [message.reply(res if (len(res) != 0) else "❏︙لا يوجد مطورين")]
if matches[0] == "رفع مطور":
if re.match(r"@[a-zA-Z][\w\d]{3,30}[a-zA-Z\d]", matches[1]):
user = await utilities.client.get_entity(matches[1])
return [addDev_user(message, user.id)]
elif re.match(r"(\d)", matches[1]):
return [addDev_user(message, matches[1])]
else:
return [message.reply("please, use by reply or use valid username and id")]
elif matches[0] == "rdev":
if re.match(r"@[a-zA-Z][\w\d]{3,30}[a-zA-Z\d]", matches[1]):
user = await utilities.client.get_entity(matches[1])
name = user.first_name
return [remDev_user(message, user.id)]
elif re.match(r"(\d)", matches[1]):
return [remDev_user(message, matches[1])]
else:
return [message.reply("please, use by reply or use valid username and id")]
elif matches == "رفع مطور":
if message.is_reply:
msg = await message.get_reply_message()
fromId = msg.from_id
chat_id = msg.chat_id
name = (await msg.get_sender()).first_name
return [addDev_user(message, fromId)]
elif matches == "rdev":
if message.is_reply:
msg = await message.get_reply_message()
fromId = msg.from_id
chat_id = msg.chat_id
return [remDev_user(message, fromId)]
elif matches == "مسح المطورين":
devlist = getDevsUsers()
for user in devlist:
remDevUser(user.user_id)
utilities.devs.remove(user.user_id)
return [message.reply("❏︙تم تنزيل جميع المطورين")]
return response
plugin = {
"name": "",
"desc": "Make someone dev",
"usage": [
"/مسح المطورين",
"/المطورين",
"/تنزيل مطور + برد",
"/رفع مطور + برد",
],
"run": run,
"sudo": True,
"patterns": [
"^[!/#](مسح المطورين)$",
"^[!/#](المطورين)",
"^[!/#](dev)$",
"^[!/#](تنزيل مطور)$",
"^[!/#](رفع مطور) (.+)$",
"^[!/#](rdev) (.+)$",
],
}
|
the-stack_0_27898
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simplified chat demo for websockets.
Authentication, error handling, etc are left as an exercise for the reader :)
"""
import logging
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os.path
import uuid
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/", MainHandler), (r"/chatsocket", ChatSocketHandler)]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
super().__init__(handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", messages=ChatSocketHandler.cache)
class ChatSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
cache = []
cache_size = 200
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
ChatSocketHandler.waiters.add(self)
def on_close(self):
ChatSocketHandler.waiters.remove(self)
@classmethod
def update_cache(cls, chat):
cls.cache.append(chat)
if len(cls.cache) > cls.cache_size:
cls.cache = cls.cache[-cls.cache_size :]
@classmethod
def send_updates(cls, chat):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, message):
logging.info("got message %r", message)
parsed = tornado.escape.json_decode(message)
chat = {"id": str(uuid.uuid4()), "body": parsed["body"]}
chat["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=chat)
)
ChatSocketHandler.update_cache(chat)
ChatSocketHandler.send_updates(chat)
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
the-stack_0_27899
|
"""
Statistics-related constants.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
_EPS = np.finfo(float).eps
# The largest [in magnitude] usable floating value.
_XMAX = np.finfo(float).machar.xmax
# The smallest [in magnitude] usable floating value.
_XMIN = np.finfo(float).machar.xmin
# -special.psi(1)
_EULER = 0.577215664901532860606512090082402431042
# special.zeta(3, 1) Apery's constant
_ZETA3 = 1.202056903159594285399738161511449990765
|
the-stack_0_27900
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves.urllib.request import Request
from six.moves.urllib.request import urlopen
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import format_master_url
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
_GKE_ENV_VARIABLE = 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'
_ENDPOINTS_SEPARATOR = ','
_DEFAULT_ENV_VARIABLE = 'TPU_NAME'
_DISCOVERY_SERVICE_URL_ENV_VARIABLE = 'TPU_API_DISCOVERY_URL'
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def _tpuService(self):
"""Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object calls this method to get a new API object whenever they need
to communicate with the Cloud API.
Returns:
A Google Cloud TPU API object.
"""
if self._service:
return self._service
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials,
discoveryServiceUrl=self._discovery_url)
else:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials)
def _requestComputeMetadata(self, path):
req = Request('http://metadata/computeMetadata/v1/%s' % path,
headers={'Metadata-Flavor': 'Google'})
resp = urlopen(req)
return compat.as_bytes(resp.read())
def _shouldResolve(self):
if isinstance(self._should_resolve_override, bool):
return self._should_resolve_override
if (self._tpu == compat.as_bytes('') or
self._tpu == compat.as_bytes('local') or
self._tpu.startswith(compat.as_bytes('/bns')) or
self._tpu.startswith(compat.as_bytes('localhost:')) or
self._tpu.startswith(compat.as_bytes('grpc://'))):
return False
return True
@staticmethod
def _inGke():
"""When running in GKE, the environment variable will be set."""
return _GKE_ENV_VARIABLE in os.environ
@staticmethod
def _gkeEndpoints():
return os.environ[_GKE_ENV_VARIABLE]
@staticmethod
def _envVarFallback():
if _DEFAULT_ENV_VARIABLE in os.environ:
return os.environ[_DEFAULT_ENV_VARIABLE]
return None
@staticmethod
def _environmentDiscoveryUrl():
return os.environ.get(_DISCOVERY_SERVICE_URL_ENV_VARIABLE)
def __init__(self,
tpu=None,
zone=None,
project=None,
job_name='worker',
coordinator_name=None,
coordinator_address=None,
credentials='default',
service=None,
discovery_url=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
tpu: Either a string, or a list of strings corresponding to the TPUs to
use. If the single string is the empty string, the string 'local', or a
string that begins with 'grpc://' or '/bns', then it is assumed to not
correspond with a Cloud TPU and will instead be passed as the session
master and no ClusterSpec propagation will be done.
zone: Zone where the TPUs are located. If omitted or empty, we will assume
that the zone of the TPU is the same as the zone of the GCE VM, which we
will try to discover from the GCE metadata service.
project: Name of the GCP project containing Cloud TPUs. If omitted or
empty, we will try to discover the project name of the GCE VM from the
GCE metadata service.
job_name: Name of the TensorFlow job the TPUs belong to.
coordinator_name: The name to use for the coordinator. Set to None if the
coordinator should not be included in the computed ClusterSpec.
coordinator_address: The address of the coordinator (typically an ip:port
pair). If set to None, a TF server will be started. If coordinator_name
is None, a TF server will not be started even if coordinator_address is
None.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
discovery_url: A URL template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URL to the
discovery document for that service. The environment variable
'TPU_API_DISCOVERY_URL' will override this.
Raises:
ImportError: If the googleapiclient is not installed.
ValueError: If no TPUs are specified.
"""
if isinstance(tpu, list):
if not tpu:
raise ValueError('At least one TPU must be specified.')
if len(tpu) != 1:
raise NotImplementedError(
'Using multiple TPUs in a single session is not yet implemented')
tpu = tpu[0]
in_gke = self._inGke()
# When using GKE with Cloud TPUs, the env variable will be set.
if tpu is None:
if in_gke:
tpu = self._gkeEndpoints()
else:
tpu = self._envVarFallback()
if tpu is None:
raise ValueError('Please provide a TPU Name to connect to.')
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
# By default the task_type is 'worker` and the task_index is 0 (which is the
# first worker in the task).
self.task_type = job_name
self.task_index = 0
if tpu.startswith('grpc://'):
# Cloud environment, where we are using GRPC to communicate to TPUs.
self.environment = ''
elif tpu == 'local' or not tpu:
# Google environment, where the TPU is attached to the host.
self.environment = 'google'
elif tpu.startswith('/bns'):
# Google environment, where we reach the TPU through BNS.
self.environment = 'google'
# If TPU is in the Google environment or exists locally, we don't use any
# RPC layer.
if tpu.startswith('/bns') or tpu == 'local' or not tpu:
self.rpc_layer = None
else:
self.rpc_layer = 'grpc'
# Setting this overrides the return value of self._shouldResolve()
self._should_resolve_override = None
# We strip out the protocol if it is included, and override the
# shouldResolve function to never resolve. We are adding the protocol back
# in later in self.master().
if self.rpc_layer is not None and tpu.startswith(self.rpc_layer + '://'):
tpu = tpu[len(self.rpc_layer + '://'):]
self._tpu = tpu
self._should_resolve_override = False
# Whether we should actually attempt to contact Cloud APIs
should_resolve = self._shouldResolve()
# We error out if we are in a non-Cloud environment which cannot talk to the
# Cloud APIs using the standard class and a special object is not passed in.
self._service = service
if (self._service is None and should_resolve and
not _GOOGLE_API_CLIENT_INSTALLED):
raise ImportError('googleapiclient and oauth2client must be installed '
'before using the TPU cluster resolver. Execute: '
'`pip install --upgrade google-api-python-client` '
'and `pip install --upgrade oauth2client` to '
'install with pip.')
# We save user-passed credentials, unless the user didn't pass in anything.
self._credentials = credentials
if (credentials == 'default' and should_resolve and
_GOOGLE_API_CLIENT_INSTALLED):
self._credentials = None
# Automatically detect project and zone if unspecified.
if not project and should_resolve:
project = compat.as_str(
self._requestComputeMetadata('project/project-id'))
if not zone and should_resolve:
zone_path = compat.as_str(self._requestComputeMetadata('instance/zone'))
zone = zone_path.split('/')[-1]
self._project = project
self._zone = zone
self._discovery_url = self._environmentDiscoveryUrl() or discovery_url
self._coordinator_name = coordinator_name
if (coordinator_name and not coordinator_address and
(should_resolve or in_gke)):
self._start_local_server()
else:
self._coordinator_address = coordinator_address
def master(self, task_type=None, task_index=None, rpc_layer=None):
"""Get the Master string to be used for the session.
In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of
first instance in the ClusterSpec returned by the cluster_spec function.
If a non-TPU name is used when constructing a TPUClusterResolver, that will
be returned instead (e.g. If the tpus argument's value when constructing
this TPUClusterResolver was 'grpc://10.240.1.2:8470',
'grpc://10.240.1.2:8470' will be returned).
Args:
task_type: (Optional, string) The type of the TensorFlow task of the
master.
task_index: (Optional, integer) The index of the TensorFlow task of the
master.
rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to
communicate with TPUs.
Returns:
string, the connection string to use when creating a session.
Raises:
ValueError: If none of the TPUs specified exists.
"""
if self._shouldResolve():
# We are going to communicate with the Cloud TPU APIs to get a Cluster.
cluster_spec = self.cluster_spec()
if task_type is not None and task_index is not None:
# task_type and task_index is from the function parameter
master = cluster_spec.task_address(task_type, task_index)
elif self.task_type is not None and self.task_index is not None:
# task_type and task_index is from the object
master = cluster_spec.task_address(self.task_type, self.task_index)
else:
# by default we take the first item in the cluster with the right name
job_tasks = cluster_spec.job_tasks(self.task_type)
if not job_tasks:
raise ValueError('No TPUs with the specified names exist.')
master = job_tasks[0]
else:
if isinstance(self._tpu, (bytes, bytearray)):
master = self._tpu.split(compat.as_bytes(_ENDPOINTS_SEPARATOR))[0]
else:
master = self._tpu.split(_ENDPOINTS_SEPARATOR)[0]
return format_master_url(master, rpc_layer or self.rpc_layer)
def get_master(self):
return self.master()
def get_job_name(self):
if self._shouldResolve():
return self.task_type
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
Raises:
RuntimeError: If the provided TPU is not healthy.
"""
############################################################################
# There are 5 potential cases this code must handle:
# 1. [Normal case.] We should resolve the TPU name to a set of tasks, and
# a. Create a ClusterSpec that includes the coordinator job
# b. Create a ClusterSpec without the coordinator job.
# 2. [GKE / No API Access.] We should not resolve the TPU name to a set of
# tasks and
# a. Create a ClusterSpec with the coordinator
# b. Create a ClusterSpec without the coordinator
# 3. [Other (legacy non-gRPC).] We should return an empty ClusterSpec.
############################################################################
if self._shouldResolve():
# Case 1.
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, compat.as_text(self._tpu))
service = self._tpuService()
request = service.projects().locations().nodes().get(name=full_name)
response = request.execute()
if 'state' in response and response['state'] != 'READY':
raise RuntimeError('TPU "%s" is not yet ready; state: "%s"' %
(compat.as_text(self._tpu), response['state']))
if 'health' in response and response['health'] != 'HEALTHY':
raise RuntimeError('TPU "%s" is unhealthy: "%s"' %
(compat.as_text(self._tpu), response['health']))
if 'networkEndpoints' in response:
worker_list = [
'%s:%s' % (endpoint['ipAddress'], endpoint['port'])
for endpoint in response['networkEndpoints']
]
else:
# Fall back to the deprecated response format
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list = [instance_url]
cluster_spec = {self.task_type: worker_list}
else:
if self.rpc_layer is None:
# Case 3.
return None
# Case 2.
tpus = []
for tpu in self._tpu.split(_ENDPOINTS_SEPARATOR):
# We are working around the fact that GKE environment variable that is
# supplied to us has the protocol string embedded in it, but we want
# to strip it out for the ClusterSpec.
if (self.rpc_layer is not None and
tpu.startswith(self.rpc_layer + '://')):
tpus.append(tpu[len(self.rpc_layer + '://'):])
else:
tpus.append(tpu)
cluster_spec = {self.task_type: tpus}
if self._coordinator_address:
# {1, 2}.a
cluster_spec[self._coordinator_name] = [self._coordinator_address]
return server_lib.ClusterSpec(cluster_spec)
def num_accelerators_per_worker(self, session_config=None):
"""Returns the number of TPU cores per worker.
This defaults to 8 for all current TPU configurations, and we do not need
to query any remote systems for this.
Args:
session_config: Unused. Not currently necessary to query anything as this
number is 8 for all TPU configurations.
"""
del session_config # Unused. Not necessary to query anything.
return 8
def _start_local_server(self):
address = self._requestComputeMetadata('instance/network-interfaces/0/ip')
self._server = server_lib.Server(
{
'local': ['0.0.0.0:0']
}, protocol='grpc', config=None, start=True)
# self._server.target is of the form: grpc://ipaddress:port
target = compat.as_bytes(self._server.target)
splits = target.split(compat.as_bytes(':'))
assert len(splits) == 3, self._server.target
assert splits[0] == compat.as_bytes('grpc'), self._server.target
self._coordinator_port = compat.as_text(splits[2])
self._coordinator_address = '%s:%s' % (
address, compat.as_text(self._coordinator_port))
def __deepcopy__(self, memo):
# TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.
return self
|
the-stack_0_27904
|
#!/usr/bin/env python3
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from utils import display_num, convert_num, download_image
from tweet import twitter_post, twitter_post_image
module = "Spotify"
def login():
"""Logs in to Spotify
Client credential authorization flow
The following API keys are needed to be set as environment variables:
* SPOTIPY_CLIENT_ID
* SPOTIPY_CLIENT_SECRET
You can request API keys on the `Spotify Developer Dashboard <https://developer.spotify.com/dashboard/>`_
See https://spotipy.readthedocs.io/en/2.16.1/#authorization-code-flow for more details
"""
print("[{}] Logging in...".format(module))
auth_manager = SpotifyClientCredentials()
spotify = spotipy.Spotify(auth_manager=auth_manager)
return spotify
def get_artist(spotify, artist, hashtags):
"""Gets details about an artist
It tweets if the artist reaches a new goal of followers on Spotify
Args:
- spotify: The Spotify instance
- artist: dictionary that contains all the data about the single artist
- hashtags: hashtags to append to the Tweet
Returns:
an artist dictionary with updated profile details
"""
# Generate URI
artist["uri"] = 'spotify:artist:' + artist["id"]
artist_details = spotify.artist(artist["uri"])
artist["name"] = artist_details["name"]
print("[{}] ({}) Getting details... (ID: {})".format(module, artist["name"], artist["id"]))
artist["popularity"] = artist_details["popularity"]
artist["genres"] = artist_details["genres"]
try:
artist["image"] = artist_details["images"][0]["url"]
except:
artist["image"] = None
print("WARNING: Cannot fetch image of {}".format(artist["name"])) # This fix is needed for artists without a profile image
if convert_num("100K", artist_details["followers"]["total"]) > convert_num("100K", artist["followers"]):
artist["followers"] = artist_details["followers"]["total"]
twitter_post_image(
"{} reached {} followers on #Spotify\n{}\n{}".format(artist["name"], display_num(artist["followers"], decimal=True), link_artist(artist["id"]), hashtags),
download_image(artist["image"]),
display_num(artist["followers"], short=True, decimal=True),
text_size=125
)
return artist
def get_discography(spotify, artist):
"""Gets all the releases of an artist
A release is single, EP, mini-album or album: Spotify simply calls them all "albums"
Example:
* `DDU-DU-DDU-DU <https://open.spotify.com/album/2811CkGSYR9SUtIoFWWiTk>`_ of BLACKPINK is a **single**
* `SQUARE UP <https://open.spotify.com/album/0wOiWrujRbxlKEGWRQpKYc>`_ of BLACKPINK is a **mini-album**
* `THE ALBUM <https://open.spotify.com/album/71O60S5gIJSIAhdnrDIh3N>`_ of BLACKPINK is (really) an **album**
It also gets releases where the artist is **featured**.
Example:
* `Sour Candy <https://open.spotify.com/album/6y6lP1WRfqEhv8RLy4ufZB>`_ is a song of Lady Gaga, but BLACKPINK are featured
Spotify also makes many "clones" of the same album: there could be extended albums or albums that later added tracks.
Each one of this makes a duplicate of the same album.
So this function also tries to clean up the discography by removing duplicates.
Args:
- spotify: The Spotify instance
- artist: dictionary that contains all the data about the single artist
Returns:
an dictionary with updated discography details
"""
print("[{}] ({}) Fetching discography...".format(module, artist["name"]))
# ALBUM DETAILS
albumResults = spotify.artist_albums(artist["uri"], limit=50)
albumResults = albumResults['items']
z = 0
# Loop over album
collection = []
for album in albumResults:
# TRACK DETAILS
trackResults = spotify.album_tracks(album['id'])
trackResults = trackResults['items']
## Loop over tracks
tracks = []
for track in trackResults:
artists_names =[]
artists_ids = []
for artist_track in track['artists']:
artists_names.append(artist_track['name'])
artists_ids.append(artist_track['id'])
## Extract track data and fill database
if artist["id"] in artists_ids:
z+=1
tracks.append({'name':track['name'],
'id': track['id']}
)
if album['album_group'] != 'appears_on':
collection.append({'name':album['name'],
'id': album['id'],
'release_date':album['release_date'],
'total_tracks':album['total_tracks'],
'type':album['album_group'],
'image':album['images'][0]['url'],
'tracks': tracks}
)
else:
collection.append({'name':album['name'],
'id': album['id'],
'release_date':album['release_date'],
'total_tracks':album['total_tracks'],
'type':album['album_group'],
'image':album['images'][0]['url'],
'artist_collab':album['artists'][0]['name'],
'tracks': tracks}
)
print("[{}] ({}) Fetched {} songs".format(module, artist["name"], z))
# Remove duplicates
seen = set()
result = []
z = 0
for album in collection:
key = album['name']
if key in seen:
continue
result.append(album)
z += 1
seen.add(key)
print("[{}] ({}) After removing duplicates we have {} releases (singles/EPs/albums)".format(module, artist["name"], z))
# Uncomment for debug: it prints all the albums and singles fetched
#
# for album in result:
# print(album["name"].upper())
# for track in album["tracks"]:
# print(track["name"])
# print()
return result
def check_new_songs(artist, collection, hashtags):
"""Checks if there is any new song
It compares the old discography of the artist with the new (already fetched) discography.
It tweets if there is a new release or featuring of the artist.
Args:
- artist: dictionary that contains all the data about the single artist
- collection: dictionary that contains all the updated discography of the artist
- hashtags: hashtags to append to the Tweet
Returns:
an artist dictionary with updated discography details
"""
print("[{}] ({}) Checking new songs...".format(module, artist["name"]))
# Skip check if discography is empty
if "discography" in artist:
old = artist["discography"]
for album in collection:
found = False
for old_album in old:
if album["name"] == old_album["name"]:
found = True
break
if not found:
if album["type"] != 'appears_on':
twitter_post_image(
"{} released a new {} on #Spotify: {}\n{}\n{}".format(artist["name"], album["type"], album["name"], link_album(album["id"]), hashtags),
download_image(album["image"]),
None
)
else:
twitter_post("{} appeared on {} by {} with the song {}\n{}\n{} #spotify".format(artist["name"], album["name"], album["artist_collab"], album["tracks"][0]["name"], link_album(album["id"]), hashtags))
artist["discography"] = collection
return artist
def link_album(album_id):
"""Generates a link to an album
Args:
album_id: ID of the album
Returns:
The link to that album on Spotify
"""
return "https://open.spotify.com/album/" + album_id
def link_artist(artist_id):
"""Generates a link to an artist
Args:
artist_id: ID of the artist
Returns:
The link to that artist on Spotify
"""
return "https://open.spotify.com/artist/" + artist_id
def spotify_data(group):
"""Runs all the Spotify related tasks
It scrapes data from Spotify for the whole group and the single artists
Args:
group: dictionary with the data of the group to scrape
Returns:
the same group dictionary with updated data
"""
print("[{}] Starting tasks...".format(module))
spotify = login()
group["spotify"] = get_artist(spotify, group["spotify"], group["hashtags"])
collection = get_discography(spotify, group["spotify"])
group["spotify"] = check_new_songs(group["spotify"], collection, group["hashtags"])
for member in group["members"]:
if "spotify" in member:
member["spotify"] = get_artist(spotify, member["spotify"], member["hashtags"])
collection = get_discography(spotify, member["spotify"])
member["spotify"] = check_new_songs(member["spotify"], collection, member["hashtags"])
print()
return group
|
the-stack_0_27905
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 11:42:49 2021
@author: Easin
"""
in1 = input()
in1 = int(in1)
if in1 %2 != 0:
x = in1 -9
y = 9
else:
x = in1 -4
y = 4
print(x,y)
|
the-stack_0_27906
|
"""
Define a set of classes that function like forecasters, akin
to the R forecast package.
"""
import copy
import itertools
from typing import List, Tuple, Callable
import pandas as pd
import numpy as np
import scipy.linalg as spla
import tensorly as tl
from scipy.fftpack import rfft, irfft, dct
from tensorly.decomposition import parafac, tucker
from .utils import mad, multifold, rmse
TENSOR_MAX_ITER = 500
class ForecasterResult:
def __init__(
self,
inputs: pd.Series,
forecaster: "SeasonalForecaster",
in_sample_approx: pd.Series,
forecast: pd.Series,
in_errors: List[float],
out_errors: List[float],
nr_total_params: int,
):
self.inputs = inputs
self.forecaster = forecaster
self.in_sample_approx = in_sample_approx
self.forecast = forecast
self.in_errors = in_errors
self.out_errors = out_errors
self.nr_total_params = nr_total_params
class SeasonalForecaster:
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
):
"""
:param nr_params: number of parameters for the forecaster. specific definitions change
by forecaster.
:param folds: a tuple representing the folds of the seasonality. faster period comes first.
i.e., (24, 7) not (7, 24)
:param error_callbacks:
"""
self.nr_params = nr_params
self.folds = folds
self.error_callbacks = error_callbacks
self.nr_total_params = self.nr_params
def run_forecast(self, vals: pd.Series, nr_in_cycles: int) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplemented()
def __call__(self, vals: pd.Series, nr_in_cycles: int, **kwargs):
assert nr_in_cycles > 1, "number of cycles in sample must be > 1"
assert nr_in_cycles * np.prod(self.folds) > len(vals) / 2, (
"provide more data in sample then out of sample"
)
in_sample_approx, forecast = self.run_forecast(vals, nr_in_cycles)
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
data_in, data_out = (
vals.values[:nr_in_steps], vals.values[nr_in_steps:]
)
in_errors = [
callback(data_in, in_sample_approx) for callback in self.error_callbacks
]
out_errors = [
callback(data_out, forecast) for callback in self.error_callbacks
]
return ForecasterResult(
inputs=vals,
forecaster=self,
in_sample_approx=pd.Series(in_sample_approx, index=vals.index[:nr_in_steps]),
forecast=pd.Series(forecast, index=vals.index[nr_in_steps:]),
in_errors=in_errors,
out_errors=out_errors,
nr_total_params=self.nr_total_params,
)
class DCTForecaster(SeasonalForecaster):
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
):
"""
:param nr_params: number of DCT components to forecast with.
"""
super().__init__(nr_params, folds, error_callbacks)
def run_forecast(self, vals: pd.Series, nr_in_cycles: int):
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
data_in, data_out = (
vals.values[:nr_in_steps], vals.values[nr_in_steps:]
)
z = dct(data_in) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-self.nr_params:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
y = dct(z_masked, type=3) / len(z) / 2
return (
y, # in-sample reconstruction
y[:len(data_out)], # forecasts
)
class DFTForecaster(SeasonalForecaster):
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
):
"""
:param nr_params: number of DFT components to forecast with.
"""
super().__init__(nr_params, folds, error_callbacks)
def run_forecast(self, vals: pd.Series, nr_in_cycles: int):
nr_steps = len(vals)
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
data_in, data_out = (
vals.values[:nr_in_steps], vals.values[nr_in_steps:]
)
z = rfft(data_in) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-self.nr_params:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
y = irfft(z_masked)
return (
y, # in-sample reconstruction
y[:(nr_steps - nr_in_steps)], # forecasts
)
class TensorForecaster(SeasonalForecaster):
def tensor_reconstruction(self, data_in: np.ndarray) -> np.ndarray:
raise NotImplemented()
def run_forecast(self, vals: pd.Series, nr_in_cycles: int) -> Tuple[np.ndarray, np.ndarray]:
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
nr_out_steps = len(vals) - nr_in_steps
nr_steps_per_cycle = np.prod(self.folds)
nr_out_cycles = int(np.ceil(nr_out_steps / nr_steps_per_cycle))
data_in, _ = (
vals.values[:nr_in_steps], vals.values[nr_in_steps:]
)
in_sample_approx = self.tensor_reconstruction(data_in)
cycle_approx = in_sample_approx[-nr_steps_per_cycle:]
forecast = np.tile(cycle_approx, nr_out_cycles)[:nr_out_steps]
return in_sample_approx, forecast
class CPForecaster(TensorForecaster):
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
alpha: float = 1.0,
):
"""
Parameters
----------
nr_params
rank of tensor
alpha
smoothing parameter for the time factor
"""
super().__init__(nr_params, folds, error_callbacks)
self.alpha = alpha
self.nr_total_params = int(nr_params * np.sum(folds))
def tensor_reconstruction(self, data_in: np.ndarray) -> np.ndarray:
tensor = multifold(data_in, list(self.folds))
tensor = copy.deepcopy(tensor)
fac = parafac(tensor, rank=self.nr_params, n_iter_max=TENSOR_MAX_ITER, tol=1.0e-13, linesearch=True)
if self.alpha < 1:
time_factor = fac.factors[0]
for i in range(time_factor.shape[0]):
time_factor[i] = time_factor[i] * self.alpha + time_factor[i-1] * (1 - self.alpha)
fac.factors = [time_factor] + fac.factors[1:]
return tl.cp_to_tensor(fac).ravel()
class TuckerForecaster(TensorForecaster):
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
alpha: float = 1.0,
):
"""
Parameters
----------
nr_params
rank of tensor
alpha
smoothing parameter for the time factor
"""
super().__init__(nr_params, folds, error_callbacks)
ranks_ = self._get_tucker_ranks()
self.alpha = alpha
self.nr_total_params = int(
np.sum(np.array(self.folds) * np.array(ranks_[1:])) + np.prod(ranks_[1:])
)
def _get_tucker_ranks(self):
ranks = np.minimum(
list(self.folds), [self.nr_params for _ in range(len(self.folds))]
)
return np.r_[1, ranks].astype(int).tolist()
def tensor_reconstruction(self, data_in: np.ndarray) -> np.ndarray:
tensor = multifold(data_in, list(self.folds))
tensor = copy.deepcopy(tensor)
assert isinstance(self.nr_params, int)
ranks = self._get_tucker_ranks()
core, factors = tucker(tensor, ranks=ranks, n_iter_max=TENSOR_MAX_ITER, tol=1.0e-13)
if self.alpha < 1:
time_factor = factors[0]
for i in range(time_factor.shape[0]):
time_factor[i] = time_factor[i] * self.alpha + time_factor[i-1] * (1 - self.alpha)
factors = [time_factor] + factors[1:]
return tl.tucker_to_tensor((core, factors)).ravel()
class SmoothingCPForecaster(CPForecaster):
def __init__(self, alpha=0.5, **kwargs):
super().__init__(alpha=alpha, **kwargs)
class SmoothingTuckerForecaster(TuckerForecaster):
def __init__(self, alpha=0.5, **kwargs):
super().__init__(alpha=alpha, **kwargs)
class HoltWintersForecaster(TensorForecaster):
def __init__(
self,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
nr_params: int = 1,
alpha: float = 0.5,
):
"""
"""
super().__init__(nr_params, folds, error_callbacks)
self.alpha = alpha
self.nr_total_params = int(np.prod(folds))
def tensor_reconstruction(self, data_in: np.ndarray) -> np.ndarray:
tensor = multifold(data_in, list(self.folds))
tensor = copy.deepcopy(tensor)
for i in range(1, tensor.shape[0]):
tensor[i] = tensor[i] * self.alpha + tensor[i-1] * (1 - self.alpha)
return tensor.ravel()
class FourierBasisRegressionForecaster(SeasonalForecaster):
def __init__(
self,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
nr_params: int = 1,
):
"""
Parameters
----------
nr_params:
number of Fourier basis components to generate for each fold
"""
super().__init__(nr_params, folds, error_callbacks)
self.nr_total_params = int(len(folds) * nr_params * 2 + 1)
def run_forecast(self, vals: pd.Series, nr_in_cycles: int) -> Tuple[np.ndarray, np.ndarray]:
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
t = np.arange(len(vals), dtype=float)
periods = [np.prod(self.folds[:n]) for n in range(1, len(self.folds) + 1)]
basis_tuples = [
(np.sin(2 * np.pi * t * n / P), np.cos(2 * np.pi * t * n / P)) for n, P in
itertools.product(range(1, self.nr_params + 1), periods)
]
basis = np.concatenate([np.array(x) for x in basis_tuples])
X = np.concatenate([np.ones(basis.shape[-1])[np.newaxis, :], basis]).T
y = vals.values
y_in = y[:nr_in_steps]
X_in, X_out = X[:nr_in_steps], X[nr_in_steps:]
beta, _, _, _ = spla.lstsq(X_in, y_in)
return X_in.dot(beta), X_out.dot(beta)
|
the-stack_0_27907
|
import json
import praw
with open('.secrets/tokens.json') as f:
secrets = json.load(f)
reddit = praw.Reddit(
user_agent=secrets['user_agent'],
client_id=secrets['client_id'],
client_secret=secrets['client_secret'],
)
thread_details = (('2019', 'cib77j'), ('2021', 'm20rd1'))
for year, thread_id in thread_details:
submission = reddit.submission(id=thread_id)
submission.comments.replace_more(limit=None)
op_file = f'top_comments_{year}.txt'
with open(op_file, 'w') as f:
for top_level_comment in submission.comments:
f.write(top_level_comment.body + '\n')
|
the-stack_0_27908
|
import pytest
from typing import Callable
from _pytest.pytester import RunResult
@pytest.mark.repeat(3)
def test_cli_start(run: Callable[..., RunResult]):
"""
Startup of cli should not take longer than n seconds
"""
import time
start = time.time()
run("--help")
end = time.time()
duration = end - start
# When run in parallel, it takes a little longer
assert duration <= 5
def test_data_convert_help(run: Callable[..., RunResult]):
output = run("--help")
help_text = """usage: rasa [-h] [--version]
{init,run,shell,train,interactive,test,visualize,data,x} ..."""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
|
the-stack_0_27909
|
import fractions
import logging
import math
from itertools import tee
from struct import pack, unpack_from
import av
from ..mediastreams import VIDEO_TIME_BASE, convert_timebase
logger = logging.getLogger('codec.h264')
MAX_FRAME_RATE = 30
PACKET_MAX = 1300
NAL_TYPE_FU_A = 28
NAL_TYPE_STAP_A = 24
NAL_HEADER_SIZE = 1
FU_A_HEADER_SIZE = 2
LENGTH_FIELD_SIZE = 2
STAP_A_HEADER_SIZE = NAL_HEADER_SIZE + LENGTH_FIELD_SIZE
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
class H264PayloadDescriptor:
def __init__(self, first_fragment):
self.first_fragment = first_fragment
def __repr__(self):
return 'H264PayloadDescriptor(FF={})'.format(self.first_fragment)
@classmethod
def parse(cls, data):
output = bytes()
# NAL unit header
if len(data) < 2:
raise ValueError('NAL unit is too short')
nal_type = data[0] & 0x1f
f_nri = data[0] & (0x80 | 0x60)
pos = NAL_HEADER_SIZE
if nal_type in range(1, 24):
# single NAL unit
output = bytes([0, 0, 0, 1]) + data
obj = cls(first_fragment=True)
elif nal_type == NAL_TYPE_FU_A:
# fragmentation unit
original_nal_type = data[pos] & 0x1f
first_fragment = bool(data[pos] & 0x80)
pos += 1
if first_fragment:
original_nal_header = bytes([f_nri | original_nal_type])
output += bytes([0, 0, 0, 1])
output += original_nal_header
output += data[pos:]
obj = cls(first_fragment=first_fragment)
elif nal_type == NAL_TYPE_STAP_A:
# single time aggregation packet
offsets = []
while pos < len(data):
if len(data) < pos + LENGTH_FIELD_SIZE:
raise ValueError('STAP-A length field is truncated')
nulu_size = unpack_from('!H', data, pos)[0]
pos += LENGTH_FIELD_SIZE
offsets.append(pos)
pos += nulu_size
if len(data) < pos:
raise ValueError('STAP-A data is truncated')
offsets.append(len(data) + LENGTH_FIELD_SIZE)
for start, end in pairwise(offsets):
end -= LENGTH_FIELD_SIZE
output += bytes([0, 0, 0, 1])
output += data[start:end]
obj = cls(first_fragment=True)
else:
raise ValueError('NAL unit type %d is not supported' % nal_type)
return obj, output
class H264Decoder:
def __init__(self):
self.codec = av.CodecContext.create('h264', 'r')
def decode(self, encoded_frame):
try:
packet = av.Packet(encoded_frame.data)
packet.pts = encoded_frame.timestamp
packet.time_base = VIDEO_TIME_BASE
frames = self.codec.decode(packet)
except av.AVError as e:
logger.warning('failed to decode, skipping package: ' + str(e))
return []
return frames
class H264Encoder:
def __init__(self):
self.codec = None
self.frame_idx = 0
@staticmethod
def _packetize_fu_a(data):
available_size = PACKET_MAX - FU_A_HEADER_SIZE
payload_size = len(data) - NAL_HEADER_SIZE
num_packets = math.ceil(payload_size / available_size)
num_larger_packets = payload_size % num_packets
package_size = payload_size // num_packets
f_nri = data[0] & (0x80 | 0x60) # fni of original header
nal = data[0] & 0x1f
fu_indicator = f_nri | NAL_TYPE_FU_A
fu_header_end = bytes([fu_indicator, nal | 0x40])
fu_header_middle = bytes([fu_indicator, nal])
fu_header_start = bytes([fu_indicator, nal | 0x80])
fu_header = fu_header_start
packages = []
offset = NAL_HEADER_SIZE
while offset < len(data):
if num_larger_packets > 0:
num_larger_packets -= 1
payload = data[offset:offset+package_size+1]
offset += package_size+1
else:
payload = data[offset:offset+package_size]
offset += package_size
if offset == len(data):
fu_header = fu_header_end
packages.append(fu_header + payload)
fu_header = fu_header_middle
assert offset == len(data), 'incorrect fragment data'
return packages
@staticmethod
def _packetize_stap_a(data, packages_iterator):
counter = 0
available_size = PACKET_MAX - STAP_A_HEADER_SIZE
stap_header = NAL_TYPE_STAP_A | (data[0] & 0xe0)
payload = bytes()
try:
nalu = data # with header
while len(nalu) <= available_size:
stap_header |= nalu[0] & 0x80
nri = nalu[0] & 0x60
if stap_header & 0x60 < nri:
stap_header = (stap_header & 0x9f | nri)
available_size -= LENGTH_FIELD_SIZE + len(nalu)
counter += 1
payload += pack('!H', len(nalu)) + nalu
nalu = next(packages_iterator)
if counter == 0:
nalu = next(packages_iterator)
except StopIteration:
nalu = None
if counter <= 1:
return data, nalu
else:
return bytes([stap_header]) + payload, nalu
@staticmethod
def _split_bitstream(buf):
# TODO: write in a more pytonic way,
# translate from: https://github.com/aizvorski/h264bitstream/blob/master/h264_nal.c#L134
i = 0
while True:
while ((buf[i] != 0 or buf[i+1] != 0 or buf[i+2] != 0x01)
and (buf[i] != 0 or buf[i+1] != 0 or buf[i+2] != 0 or buf[i+3] != 0x01)):
i += 1 # skip leading zero
if i+4 >= len(buf):
return
if buf[i] != 0 or buf[i+1] != 0 or buf[i+2] != 0x01:
i += 1
i += 3
nal_start = i
while ((buf[i] != 0 or buf[i+1] != 0 or buf[i+2] != 0)
and (buf[i] != 0 or buf[i+1] != 0 or buf[i+2] != 0x01)):
i += 1
# FIXME: the next line fails when reading a nal that ends
# exactly at the end of the data
if i+3 >= len(buf):
nal_end = len(buf)
yield buf[nal_start:nal_end]
return # did not find nal end, stream ended first
nal_end = i
yield buf[nal_start:nal_end]
@classmethod
def _packetize(cls, packages):
packetized_packages = []
packages_iterator = iter(packages)
package = next(packages_iterator, None)
while package is not None:
if len(package) > PACKET_MAX:
packetized_packages.extend(cls._packetize_fu_a(package))
package = next(packages_iterator, None)
else:
packetized, package = cls._packetize_stap_a(package, packages_iterator)
packetized_packages.append(packetized)
return packetized_packages
def init_codec(self):
self.codec = av.CodecContext.create('libx264', 'w')
self.codec.width = 1280
self.codec.height = 720
self.codec.pix_fmt = 'yuv420p'
self.codec.time_base = fractions.Fraction(1, MAX_FRAME_RATE)
def __pack(self, frame):
yield from self._split_bitstream(b''.join( [p.to_bytes() for p in frame]))
def encode(self, frame, force_keyframe=False):
if not self.codec:
self.init_codec()
#packages = self._encode_frame(frame, force_keyframe)
self.frame_idx += 1
timestamp = convert_timebase(self.frame_idx, self.codec.time_base, VIDEO_TIME_BASE)
pc = self.__pack(frame)
return self._packetize(pc), timestamp
"""
def _encode_frame(self, frame, force_keyframe):
if self.codec and (frame.width != self.codec.width or frame.height != self.codec.height):
self.codec = None
if self.codec is None:
self.codec = av.CodecContext.create('libx264', 'w')
self.codec.width = frame.width
self.codec.height = frame.height
self.codec.pix_fmt = 'yuv420p'
self.codec.time_base = fractions.Fraction(1, MAX_FRAME_RATE)
self.codec.options = {
'profile': 'baseline',
'level': '31',
'tune': 'zerolatency'
}
packages = self.codec.encode(frame)
yield from self._split_bitstream(b''.join(p.to_bytes() for p in packages))
def encode(self, frame, force_keyframe=False):
packages = self._encode_frame(frame, force_keyframe)
timestamp = convert_timebase(frame.pts, frame.time_base, VIDEO_TIME_BASE)
return self._packetize(packages), timestamp
"""
def h264_depayload(payload):
descriptor, data = H264PayloadDescriptor.parse(payload)
return data
|
the-stack_0_27910
|
from collections import defaultdict, namedtuple
from queue import Queue
from collections import deque
from copy import deepcopy
import inspect
from threading import Event
Parameter = namedtuple("Parameter", ["address", "value"])
def init_queue(initial_values):
queue = Queue()
for x in initial_values:
queue.put(x)
return queue
class Halt:
PARAMETERS = 0
def execute(self, intcode, parameters):
intcode.halted = True
return -1
class Add:
PARAMETERS = 3
def execute(self, intcode, parameters):
intcode[parameters[2].address] = parameters[0].value + parameters[1].value
return intcode.advance_pointer(self.PARAMETERS)
class Mul:
PARAMETERS = 3
def execute(self, intcode, parameters):
intcode[parameters[2].address] = parameters[0].value * parameters[1].value
return intcode.advance_pointer(self.PARAMETERS)
class Input:
PARAMETERS = 1
def execute(self, intcode, parameters):
intcode[parameters[0].address] = intcode.input_provider()
return intcode.advance_pointer(self.PARAMETERS)
class Output:
PARAMETERS = 1
def execute(self, intcode, parameters):
intcode.output(parameters[0].value)
return intcode.advance_pointer(self.PARAMETERS)
class JumpIfNotZero:
PARAMETERS = 2
def execute(self, intcode, parameters):
if parameters[0].value != 0:
return parameters[1].value
else:
return intcode.advance_pointer(self.PARAMETERS)
class JumpIfZero:
PARAMETERS = 2
def execute(self, intcode, parameters):
if parameters[0].value == 0:
return parameters[1].value
else:
return intcode.advance_pointer(self.PARAMETERS)
class LessThen:
PARAMETERS = 3
def execute(self, intcode, parameters):
intcode[parameters[2].address] = int(parameters[0].value < parameters[1].value)
return intcode.advance_pointer(self.PARAMETERS)
class Equal:
PARAMETERS = 3
def execute(self, intcode, parameters):
intcode[parameters[2].address] = int(parameters[0].value == parameters[1].value)
return intcode.advance_pointer(self.PARAMETERS)
class SetRelativeBaseOffset:
PARAMETERS = 1
def execute(self, intcode, parameters):
intcode.relative_base += parameters[0].value
return intcode.advance_pointer(self.PARAMETERS)
class Intcode:
INSTRUCTIONS = {
99: Halt,
1: Add,
2: Mul,
3: Input,
4: Output,
5: JumpIfNotZero,
6: JumpIfZero,
7: LessThen,
8: Equal,
9: SetRelativeBaseOffset,
}
def __init__(self, code, inputs=None, outputs=None, initial_memory=None):
self.code = code
self.memory = defaultdict(int)
self.inputs = inputs if inputs is not None else []
self.outputs = outputs if outputs is not None else []
self.pointer = 0
self.relative_base = 0
self.halted = False
self.shutdown_event = Event()
# initialize with given memory
if initial_memory:
for address, value in initial_memory.items():
self[address] = value
if callable(self.inputs):
self.input_provider = inputs
elif isinstance(self.inputs, Queue):
self.input_provider = self.inputs.get
elif isinstance(self.inputs, deque):
self.input_provider = self.inputs.popleft
elif isinstance(self.inputs, list):
self.input_provider = lambda: self.inputs.pop(0)
elif inspect.isgenerator(self.inputs):
self.input_provider = lambda: next(self.inputs)
else:
assert False, f"Unknown Input {type(self.inputs)}"
if isinstance(self.outputs, Queue):
self.output = self.outputs.put
self.output_ready = lambda: not self.outputs.empty()
self.output_consume = self.outputs.get
elif isinstance(self.outputs, deque):
self.output = self.outputs.append
self.output_ready = lambda: len(self.outputs) > 0
self.output_consume = self.outputs.popleft
elif isinstance(self.outputs, list):
self.output = self.outputs.append
self.output_ready = lambda: len(self.outputs) > 0
self.output_consume = lambda: self.outputs.pop(0)
else:
assert False, f"Unknown Output {type(self.outputs)}"
def __deepcopy__(self, memo):
copy = Intcode(
deepcopy(self.code),
inputs=deepcopy(self.inputs),
outputs=deepcopy(self.outputs),
initial_memory=deepcopy(self.memory),
)
copy.pointer = self.pointer
copy.relative_base = self.relative_base
copy.halted = self.halted
return copy
def run(self):
self.shutdown_event.clear()
while not self.halted and not self.shutdown_event.is_set():
self.step()
return self.outputs
def step(self):
self.pointer = self._execute_instruction_at(self.pointer)
def step_to_next_output(self):
while not self.halted and not self.output_ready():
self.step()
if self.halted:
return None
return self.output_consume()
def _execute_instruction_at(self, pointer):
instruction_code = str(self.code[pointer]).zfill(5)
opcode = int(instruction_code[-2:])
parameter_modes = instruction_code[:-2][::-1]
instruction = self.INSTRUCTIONS[opcode]()
# parse parameter w.r.t. the parameter mode
parameters = []
for i in range(instruction.PARAMETERS):
value = self.code[pointer + 1 + i] + (self.relative_base if parameter_modes[i] == "2" else 0)
if parameter_modes[i] == "1":
parameters.append(Parameter(value, value))
else:
parameters.append(Parameter(value, self[value]))
return instruction.execute(self, parameters)
def __getitem__(self, address):
return self.code[address] if address < len(self.code) else self.memory[address]
def __setitem__(self, address, value):
if address < len(self.code):
self.code[address] = value
else:
self.memory[address] = value
def advance_pointer(self, parameters):
return self.pointer + 1 + parameters
def shutdown(self):
self.shutdown_event.set()
|
the-stack_0_27911
|
"""Handle connection information interface with non-secrets storage."""
import json
from enum import Enum
from typing import Any, Union
from marshmallow import fields, validate
from ...config.injection_context import InjectionContext
from ...messaging.models.base_record import BaseRecord, BaseRecordSchema
from ...messaging.valid import INDY_DID, INDY_RAW_PUBLIC_KEY, UUIDFour
from ...protocols.connections.v1_0.message_types import CONNECTION_INVITATION
from ...protocols.connections.v1_0.messages.connection_invitation import (
ConnectionInvitation,
)
from ...protocols.connections.v1_0.messages.connection_request import ConnectionRequest
from ...protocols.didcomm_prefix import DIDCommPrefix
from ...protocols.out_of_band.v1_0.messages.invitation import (
Invitation as OOBInvitation,
)
from ...storage.base import BaseStorage
from ...storage.record import StorageRecord
class ConnRecord(BaseRecord):
"""Represents a single pairwise connection."""
class Meta:
"""ConnRecord metadata."""
schema_class = "ConnRecordSchema"
class Role(Enum):
"""RFC 160 (inviter, invitee) = RFC 23 (responder, requester)."""
REQUESTER = ("invitee", "requester") # == RFC 23 initiator, RFC 434 receiver
RESPONDER = ("inviter", "responder") # == RFC 160 initiator(!), RFC 434 sender
@property
def rfc160(self):
"""Return RFC 160 (connection protocol) nomenclature."""
return self.value[0]
@property
def rfc23(self):
"""Return RFC 23 (DID exchange protocol) nomenclature."""
return self.value[1]
@classmethod
def get(cls, label: Union[str, "ConnRecord.Role"]):
"""Get role enum for label."""
if isinstance(label, str):
for role in ConnRecord.Role:
if label in role.value:
return role
elif isinstance(label, ConnRecord.Role):
return label
return None
def flip(self):
"""Return interlocutor role."""
return (
ConnRecord.Role.REQUESTER
if self is ConnRecord.Role.RESPONDER
else ConnRecord.Role.RESPONDER
)
def __eq__(self, other: Union[str, "ConnRecord.Role"]) -> bool:
"""Comparison between roles."""
return self is ConnRecord.Role.get(other)
class State(Enum):
"""Collator for equivalent states between RFC 160 and RFC 23."""
INIT = ("init", "start")
INVITATION = ("invitation", "invitation")
REQUEST = ("request", "request")
RESPONSE = ("response", "response")
COMPLETED = ("active", "completed")
ABANDONED = ("error", "abandoned")
@property
def rfc160(self):
"""Return RFC 160 (connection protocol) nomenclature."""
return self.value[0]
@property
def rfc23(self):
"""Return RFC 23 (DID exchange protocol) nomenclature."""
return self.value[1]
@classmethod
def get(cls, label: Union[str, "ConnRecord.State"]):
"""Get state enum for label."""
if isinstance(label, str):
for state in ConnRecord.State:
if label in state.value:
return state
elif isinstance(label, ConnRecord.State):
return label
return None
def __eq__(self, other: Union[str, "ConnRecord.State"]) -> bool:
"""Comparison between states."""
return self is ConnRecord.State.get(other)
RECORD_ID_NAME = "connection_id"
WEBHOOK_TOPIC = "connections"
LOG_STATE_FLAG = "debug.connections"
CACHE_ENABLED = True
TAG_NAMES = {"my_did", "their_did", "request_id", "invitation_key"}
RECORD_TYPE = "connection"
RECORD_TYPE_INVITATION = "connection_invitation"
RECORD_TYPE_REQUEST = "connection_request"
INVITATION_MODE_ONCE = "once"
INVITATION_MODE_MULTI = "multi"
INVITATION_MODE_STATIC = "static"
ROUTING_STATE_NONE = "none"
ROUTING_STATE_REQUEST = "request"
ROUTING_STATE_ACTIVE = "active"
ROUTING_STATE_ERROR = "error"
ACCEPT_MANUAL = "manual"
ACCEPT_AUTO = "auto"
def __init__(
self,
*,
connection_id: str = None,
my_did: str = None,
their_did: str = None,
their_label: str = None,
their_role: Union[str, "ConnRecord.Role"] = None,
invitation_key: str = None,
request_id: str = None,
state: Union[str, "ConnRecord.State"] = None,
inbound_connection_id: str = None,
error_msg: str = None,
routing_state: str = None,
accept: str = None,
invitation_mode: str = None,
alias: str = None,
**kwargs,
):
"""Initialize a new ConnRecord."""
super().__init__(
connection_id,
state=(ConnRecord.State.get(state) or ConnRecord.State.INIT).rfc23,
**kwargs,
)
self.my_did = my_did
self.their_did = their_did
self.their_label = their_label
self.their_role = (
ConnRecord.Role.get(their_role).rfc23
if isinstance(their_role, str)
else None
if their_role is None
else their_role.rfc23
)
self.invitation_key = invitation_key
self.request_id = request_id
self.error_msg = error_msg
self.inbound_connection_id = inbound_connection_id
self.routing_state = routing_state or self.ROUTING_STATE_NONE
self.accept = accept or self.ACCEPT_MANUAL
self.invitation_mode = invitation_mode or self.INVITATION_MODE_ONCE
self.alias = alias
@property
def connection_id(self) -> str:
"""Accessor for the ID associated with this connection."""
return self._id
@property
def record_value(self) -> dict:
"""Accessor to for the JSON record value properties for this connection."""
return {
prop: getattr(self, prop)
for prop in (
"their_role",
"inbound_connection_id",
"routing_state",
"accept",
"invitation_mode",
"alias",
"error_msg",
"their_label",
"state",
)
}
@classmethod
async def retrieve_by_did(
cls,
context: InjectionContext,
their_did: str = None,
my_did: str = None,
their_role: str = None,
) -> "ConnRecord":
"""Retrieve a connection record by target DID.
Args:
context: The injection context to use
their_did: The target DID to filter by
my_did: One of our DIDs to filter by
my_role: Filter connections by their role
"""
tag_filter = {}
if their_did:
tag_filter["their_did"] = their_did
if my_did:
tag_filter["my_did"] = my_did
post_filter = {}
if their_role:
post_filter["their_role"] = cls.Role.get(their_role).rfc23
return await cls.retrieve_by_tag_filter(context, tag_filter, post_filter)
@classmethod
async def retrieve_by_invitation_key(
cls, context: InjectionContext, invitation_key: str, their_role: str = None
) -> "ConnRecord":
"""Retrieve a connection record by invitation key.
Args:
context: The injection context to use
invitation_key: The key on the originating invitation
initiator: Filter by the initiator value
"""
tag_filter = {"invitation_key": invitation_key}
post_filter = {"state": cls.State.INVITATION.rfc23}
if their_role:
post_filter["their_role"] = cls.Role.get(their_role).rfc23
return await cls.retrieve_by_tag_filter(context, tag_filter, post_filter)
@classmethod
async def retrieve_by_request_id(
cls, context: InjectionContext, request_id: str
) -> "ConnRecord":
"""Retrieve a connection record from our previous request ID.
Args:
context: The injection context to use
request_id: The ID of the originating connection request
"""
tag_filter = {"request_id": request_id}
return await cls.retrieve_by_tag_filter(context, tag_filter)
async def attach_invitation(
self,
context: InjectionContext,
invitation: Union[ConnectionInvitation, OOBInvitation],
):
"""Persist the related connection invitation to storage.
Args:
context: The injection context to use
invitation: The invitation to relate to this connection record
"""
assert self.connection_id
record = StorageRecord(
self.RECORD_TYPE_INVITATION,
invitation.to_json(),
{"connection_id": self.connection_id},
)
storage: BaseStorage = await context.inject(BaseStorage)
await storage.add_record(record)
async def retrieve_invitation(
self, context: InjectionContext
) -> Union[ConnectionInvitation, OOBInvitation]:
"""Retrieve the related connection invitation.
Args:
context: The injection context to use
"""
assert self.connection_id
storage: BaseStorage = await context.inject(BaseStorage)
result = await storage.find_record(
self.RECORD_TYPE_INVITATION, {"connection_id": self.connection_id}
)
ser = json.loads(result.value)
return (
ConnectionInvitation
if DIDCommPrefix.unqualify(ser["@type"]) == CONNECTION_INVITATION
else OOBInvitation
).deserialize(ser)
async def attach_request(
self,
context: InjectionContext,
request: ConnectionRequest, # will be Union[ConnectionRequest, DIDEx Request]
):
"""Persist the related connection request to storage.
Args:
context: The injection context to use
request: The request to relate to this connection record
"""
assert self.connection_id
record = StorageRecord(
self.RECORD_TYPE_REQUEST,
request.to_json(),
{"connection_id": self.connection_id},
)
storage: BaseStorage = await context.inject(BaseStorage)
await storage.add_record(record)
async def retrieve_request(
self,
context: InjectionContext,
) -> ConnectionRequest: # will be Union[ConnectionRequest, DIDEx Request]
"""Retrieve the related connection invitation.
Args:
context: The injection context to use
"""
assert self.connection_id
storage: BaseStorage = await context.inject(BaseStorage)
result = await storage.find_record(
self.RECORD_TYPE_REQUEST, {"connection_id": self.connection_id}
)
return ConnectionRequest.from_json(result.value)
@property
def is_ready(self) -> str:
"""Accessor for connection readiness."""
return ConnRecord.State.get(self.state) in (
ConnRecord.State.COMPLETED,
ConnRecord.State.RESPONSE,
)
@property
def is_multiuse_invitation(self) -> bool:
"""Accessor for multi use invitation mode."""
return self.invitation_mode == self.INVITATION_MODE_MULTI
async def post_save(self, context: InjectionContext, *args, **kwargs):
"""Perform post-save actions.
Args:
context: The injection context to use
"""
await super().post_save(context, *args, **kwargs)
# clear cache key set by connection manager
cache_key = self.cache_key(self.connection_id, "connection_target")
await self.clear_cached_key(context, cache_key)
def __eq__(self, other: Any) -> bool:
"""Comparison between records."""
return super().__eq__(other)
class ConnRecordSchema(BaseRecordSchema):
"""Schema to allow serialization/deserialization of connection records."""
class Meta:
"""ConnRecordSchema metadata."""
model_class = ConnRecord
connection_id = fields.Str(
required=False, description="Connection identifier", example=UUIDFour.EXAMPLE
)
my_did = fields.Str(
required=False, description="Our DID for connection", **INDY_DID
)
their_did = fields.Str(
required=False, description="Their DID for connection", **INDY_DID
)
their_label = fields.Str(
required=False, description="Their label for connection", example="Bob"
)
their_role = fields.Str(
required=False,
description="Their role in the connection protocol",
validate=validate.OneOf(
[label for role in ConnRecord.Role for label in role.value]
),
example=ConnRecord.Role.REQUESTER.rfc23,
)
inbound_connection_id = fields.Str(
required=False,
description="Inbound routing connection id to use",
example=UUIDFour.EXAMPLE,
)
invitation_key = fields.Str(
required=False, description="Public key for connection", **INDY_RAW_PUBLIC_KEY
)
request_id = fields.Str(
required=False,
description="Connection request identifier",
example=UUIDFour.EXAMPLE,
)
routing_state = fields.Str(
required=False,
description="Routing state of connection",
validate=validate.OneOf(
[
getattr(ConnRecord, m)
for m in vars(ConnRecord)
if m.startswith("ROUTING_STATE_")
]
),
example=ConnRecord.ROUTING_STATE_ACTIVE,
)
accept = fields.Str(
required=False,
description="Connection acceptance: manual or auto",
example=ConnRecord.ACCEPT_AUTO,
validate=validate.OneOf(
[
getattr(ConnRecord, a)
for a in vars(ConnRecord)
if a.startswith("ACCEPT_")
]
),
)
error_msg = fields.Str(
required=False,
description="Error message",
example="No DIDDoc provided; cannot connect to public DID",
)
invitation_mode = fields.Str(
required=False,
description="Invitation mode",
example=ConnRecord.INVITATION_MODE_ONCE,
validate=validate.OneOf(
[
getattr(ConnRecord, i)
for i in vars(ConnRecord)
if i.startswith("INVITATION_MODE_")
]
),
)
alias = fields.Str(
required=False,
description="Optional alias to apply to connection for later use",
example="Bob, providing quotes",
)
|
the-stack_0_27914
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains widget implementation
"""
from __future__ import print_function, division, absolute_import
from tpDcc.libs.qt.core import base
from tpDcc.libs.qt.widgets import layouts, label
class WelcomeWidget(base.BaseWidget, object):
def __init__(self, project, parent=None):
self._project = project
super(WelcomeWidget, self).__init__(parent=parent)
def get_main_layout(self):
return layouts.HorizontalLayout(spacing=2, margins=(2, 2, 2, 2))
def ui(self):
super(WelcomeWidget, self).ui()
self.main_layout.addStretch()
lbl = label.BaseLabel('Welcome to {}!'.format(self._project.name.title()), parent=self)
lbl.setStyleSheet('font-size: 35px; font-family: "Montserrat";')
self.main_layout.addWidget(lbl)
self.main_layout.addStretch()
|
the-stack_0_27916
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import json
import time
from collections import defaultdict, OrderedDict
import requests
import demjson
LINUX_PRICING_URLS = [
# Deprecated instances (JSON format)
'https://aws.amazon.com/ec2/pricing/json/linux-od.json',
# Previous generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js',
# New generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js'
]
EC2_REGIONS = [
'us-east-1',
'us-west-1',
'us-west-2',
'eu-west-1',
'eu-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'sa-east-1'
]
EC2_INSTANCE_TYPES = [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
# Maps EC2 region name to region name used in the pricing file
REGION_NAME_MAP = {
'us-east': 'ec2_us_east',
'us-east-1': 'ec2_us_east',
'us-west': 'ec2_us_west',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
'eu-west-1': 'ec2_eu_west',
'eu-ireland': 'ec2_eu_west',
'eu-central-1': 'ec2_eu_central',
'apac-sin': 'ec2_ap_southeast',
'ap-southeast-1': 'ec2_ap_southeast',
'apac-syd': 'ec2_ap_southeast_2',
'ap-southeast-2': 'ec2_ap_southeast_2',
'apac-tokyo': 'ec2_ap_northeast',
'ap-northeast-1': 'ec2_ap_northeast',
'ap-northeast-2': 'ec2_ap_northeast',
'sa-east-1': 'ec2_sa_east',
'us-gov-west-1': 'ec2_us_govwest'
}
INSTANCE_SIZES = [
'micro',
'small',
'medium',
'large',
'xlarge',
'x-large',
'extra-large'
]
RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))')
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def scrape_ec2_pricing():
result = defaultdict(OrderedDict)
for url in LINUX_PRICING_URLS:
response = requests.get(url)
if re.match('.*?\.json$', url):
data = response.json()
elif re.match('.*?\.js$', url):
data = response.content
match = re.match('^.*callback\((.*?)\);?$', data,
re.MULTILINE | re.DOTALL)
data = match.group(1)
# demjson supports non-strict mode and can parse unquoted objects
data = demjson.decode(data)
regions = data['config']['regions']
for region_data in regions:
region_name = region_data['region']
libcloud_region_name = REGION_NAME_MAP[region_name]
instance_types = region_data['instanceTypes']
for instance_type in instance_types:
sizes = instance_type['sizes']
for size in sizes:
price = size['valueColumns'][0]['prices']['USD']
if str(price).lower() == 'n/a':
# Price not available
continue
result[libcloud_region_name][size['size']] = float(price)
return result
def update_pricing_file(pricing_file_path, pricing_data):
with open(pricing_file_path, 'r') as fp:
content = fp.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute'].update(pricing_data)
# Always sort the pricing info
data = sort_nested_dict(data)
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(pricing_file_path, 'w') as fp:
fp.write(content)
def sort_nested_dict(value):
"""
Recursively sort a nested dict.
"""
result = OrderedDict()
for key, value in sorted(value.items(), key=sort_key_by_numeric_other):
if isinstance(value, (dict, OrderedDict)):
result[key] = sort_nested_dict(value)
else:
result[key] = value
return result
def sort_key_by_numeric_other(key_value):
"""
Split key into numeric, alpha and other part and sort accordingly.
"""
return tuple((
int(numeric) if numeric else None,
INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha,
other
) for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0]))
def main():
print('Scraping EC2 pricing data')
pricing_data = scrape_ec2_pricing()
update_pricing_file(pricing_file_path=PRICING_FILE_PATH,
pricing_data=pricing_data)
print('Pricing data updated')
if __name__ == '__main__':
main()
|
the-stack_0_27918
|
# Custom imports below
import json
import requests
import validators
from icon_trendmicro_apex.util.util import get_expiration_utc_date_string
from requests.exceptions import RequestException
import komand
from komand.exceptions import PluginException
from .schema import BlacklistInput, BlacklistOutput, Input, Output, Component
class Blacklist(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="blacklist",
description=Component.DESCRIPTION,
input=BlacklistInput(),
output=BlacklistOutput(),
)
self.api_path = "/WebApp/api/SuspiciousObjects/UserDefinedSO"
self.MAX_NOTES_LENGTH = 256
self.MAX_SHA_LENGTH = 40
self.MAX_URL_LENGTH = 2046
def run(self, params={}):
payload = self.generate_payload(params)
json_payload = json.dumps(payload)
blacklist_state = params.get(Input.BLACKLIST_STATE, True)
if blacklist_state is False:
method = "DELETE"
payload_type = payload.get("param", {}).get("type")
content = payload["param"]["content"]
self.api_path = f"{self.api_path}?type={payload_type}&content={content}"
else:
method = "PUT"
self.connection.create_jwt_token(self.api_path, method, json_payload)
request_url = self.connection.url + self.api_path
response = None
try:
response = requests.request(
method.lower(),
request_url,
headers=self.connection.header_dict,
data=json_payload,
verify=False,
)
response.raise_for_status()
return {Output.SUCCESS: response is not None}
except RequestException as rex:
if response:
self.logger.error(f"Received status code: {response.status_code}")
self.logger.error(f"Response was: {response.text}")
raise PluginException(
assistance="Please verify the connection details and input data.",
cause=f"Error processing the Apex request: {rex}",
)
@staticmethod
def get_data_type(indicator):
if validators.ipv4(indicator) or validators.ipv6(indicator):
return "IP"
elif validators.url(indicator):
return "URL"
elif validators.domain(indicator):
return "DOMAIN"
elif validators.sha1(indicator):
return "FILE_SHA1"
raise PluginException(
cause="Invalid indicator input provided.",
assistance="Supported indicators are IP, URL, domain and SHA1 hash.",
)
def generate_payload(self, params):
payload_notes = ""
user_notes = params.get(Input.DESCRIPTION)
if user_notes:
if len(user_notes) > self.MAX_NOTES_LENGTH:
self.logger.warning(f"Note: exceeds maximum length, truncated to {self.MAX_NOTES_LENGTH} characters")
payload_notes = user_notes[: self.MAX_NOTES_LENGTH]
indicator = params.get(Input.INDICATOR).lower()
payload_type = self.get_data_type(indicator)
payload_scan_action = params.get(Input.SCAN_ACTION, "BLOCK")
num_days = params.get(Input.EXPIRY_DATE, 30)
payload_expiry_date = get_expiration_utc_date_string(int(num_days))
return {
"param": {
"content": indicator,
"expiration_utc_date": payload_expiry_date,
"notes": payload_notes,
"scan_action": payload_scan_action.lower(),
"type": payload_type.lower(),
}
}
|
the-stack_0_27919
|
# -*- coding: utf-8 -*-
'''
Mounting of filesystems
=======================
Mount any type of mountable filesystem with the mounted function:
.. code-block:: yaml
/mnt/sdb:
mount.mounted:
- device: /dev/sdb1
- fstype: ext4
- mkmnt: True
- opts:
- defaults
/srv/bigdata:
mount.mounted:
- device: UUID=066e0200-2867-4ebe-b9e6-f30026ca2314
- fstype: xfs
- opts: nobootwait,noatime,nodiratime,nobarrier,logbufs=8
- dump: 0
- pass_num: 2
- persist: True
- mkmnt: True
/var/lib/bigdata:
mount.mounted:
- device: /srv/bigdata
- fstype: none
- opts: bind
- dump: 0
- pass_num: 0
- persist: True
- mkmnt: True
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os.path
import re
# Import salt libs
from salt.ext.six import string_types
import logging
from salt.ext import six
log = logging.getLogger(__name__)
def _size_convert(_re_size):
converted_size = int(_re_size.group('size_value'))
if _re_size.group('size_unit') == 'm':
converted_size = int(converted_size) * 1024
if _re_size.group('size_unit') == 'g':
converted_size = int(converted_size) * 1024 * 1024
return converted_size
def mounted(name,
device,
fstype,
mkmnt=False,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
persist=True,
mount=True,
user=None,
match_on='auto',
device_name_regex=None,
extra_mount_invisible_options=None,
extra_mount_invisible_keys=None,
extra_mount_ignore_fs_keys=None,
extra_mount_translate_options=None,
hidden_opts=None,
**kwargs):
'''
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.mounted'
return ret
if not device:
ret['result'] = False
ret['comment'] = 'Must provide device to mount.mounted'
return ret
if not fstype:
ret['result'] = False
ret['comment'] = 'Must provide fstype to mount.mounted'
return ret
if device_name_regex is None:
device_name_regex = []
# Defaults is not a valid option on Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and opts == 'defaults':
opts = 'noowners'
# Defaults is not a valid option on AIX
if __grains__['os'] in ['AIX']:
if opts == 'defaults':
opts = ''
# Make sure that opts is correct, it can be a list or a comma delimited
# string
if isinstance(opts, string_types):
opts = opts.split(',')
if isinstance(hidden_opts, string_types):
hidden_opts = hidden_opts.split(',')
# remove possible trailing slash
if not name == '/':
name = name.rstrip('/')
device_list = []
# Get the active data
active = __salt__['mount.active'](extended=True)
real_name = os.path.realpath(name)
if device.startswith('/'):
if 'bind' in opts and real_name in active:
_device = device
if active[real_name]['device'].startswith('/'):
# Find the device that the bind really points at.
while True:
if _device in active:
_real_device = active[_device]['device']
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
break
_device = os.path.dirname(_device)
real_device = _real_device
else:
# Remote file systems act differently.
if _device in active:
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
real_device = active[real_name]['device']
else:
real_device = os.path.realpath(device)
elif device.upper().startswith('UUID='):
real_device = device.split('=')[1].strip('"').lower()
elif device.upper().startswith('LABEL='):
_label = device.split('=')[1]
cmd = 'blkid -t LABEL={0}'.format(_label)
res = __salt__['cmd.run_all']('{0}'.format(cmd))
if res['retcode'] > 0:
ret['comment'] = 'Unable to find device with label {0}.'.format(_label)
ret['result'] = False
return ret
else:
# output is a list of entries like this:
# /dev/sda: LABEL="<label>" UUID="<uuid>" UUID_SUB="<uuid>" TYPE="btrfs"
# exact list of properties varies between filesystems, but we're
# only interested in the device in the first column
for line in res['stdout']:
dev_with_label = line.split(':')[0]
device_list.append(dev_with_label)
real_device = device_list[0]
else:
real_device = device
# LVS devices have 2 names under /dev:
# /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name
# No matter what name is used for mounting,
# mount always displays the device as /dev/mapper/vg--name-lv--name
# Note the double-dash escaping.
# So, let's call that the canonical device name
# We should normalize names of the /dev/vg-name/lv-name type to the canonical name
lvs_match = re.match(r'^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)', device)
if lvs_match:
double_dash_escaped = dict((k, re.sub(r'-', '--', v)) for k, v in six.iteritems(lvs_match.groupdict()))
mapper_device = '/dev/mapper/{vg_name}-{lv_name}'.format(**double_dash_escaped)
if os.path.exists(mapper_device):
real_device = mapper_device
# When included in a Salt state file, FUSE devices are prefaced by the
# filesystem type and a hash, e.g. sshfs. In the mount list only the
# hostname is included. So if we detect that the device is a FUSE device
# then we remove the prefaced string so that the device in state matches
# the device in the mount list.
fuse_match = re.match(r'^\w+\#(?P<device_name>.+)', device)
if fuse_match:
if 'device_name' in fuse_match.groupdict():
real_device = fuse_match.group('device_name')
if real_name in active:
if 'superopts' not in active[real_name]:
active[real_name]['superopts'] = []
if mount:
device_list.append(active[real_name]['device'])
device_list.append(os.path.realpath(device_list[0]))
alt_device = active[real_name]['alt_device'] if 'alt_device' in active[real_name] else None
uuid_device = active[real_name]['device_uuid'] if 'device_uuid' in active[real_name] else None
label_device = active[real_name]['device_label'] if 'device_label' in active[real_name] else None
if alt_device and alt_device not in device_list:
device_list.append(alt_device)
if uuid_device and uuid_device not in device_list:
device_list.append(uuid_device)
if label_device and label_device not in device_list:
device_list.append(label_device)
if opts:
opts.sort()
mount_invisible_options = [
'_netdev',
'actimeo',
'bg',
'comment',
'defaults',
'delay_connect',
'direct-io-mode',
'intr',
'loop',
'nointr',
'nobootwait',
'nofail',
'password',
'reconnect',
'retry',
'soft',
'auto',
'users',
'bind',
'nonempty',
'transform_symlinks',
'port',
'backup-volfile-servers',
]
if extra_mount_invisible_options:
mount_invisible_options.extend(extra_mount_invisible_options)
if hidden_opts:
mount_invisible_options = list(set(mount_invisible_options) | set(hidden_opts))
# options which are provided as key=value (e.g. password=Zohp5ohb)
mount_invisible_keys = [
'actimeo',
'comment',
'credentials',
'direct-io-mode',
'password',
'port',
'retry',
'secretfile',
]
if extra_mount_invisible_keys:
mount_invisible_keys.extend(extra_mount_invisible_keys)
# Some filesystems have options which should not force a remount.
mount_ignore_fs_keys = {
'ramfs': ['size']
}
if extra_mount_ignore_fs_keys:
mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys)
# Some options are translated once mounted
mount_translate_options = {
'tcp': 'proto=tcp',
'udp': 'proto=udp',
}
if extra_mount_translate_options:
mount_translate_options.update(extra_mount_translate_options)
for opt in opts:
if opt in mount_translate_options:
opt = mount_translate_options[opt]
keyval_option = opt.split('=')[0]
if keyval_option in mount_invisible_keys:
opt = keyval_option
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
# make cifs option user synonym for option username which is reported by /proc/mounts
if fstype in ['cifs'] and opt.split('=')[0] == 'user':
opt = "username={0}".format(opt.split('=')[1])
if opt.split('=')[0] in mount_ignore_fs_keys.get(fstype, []):
opt = opt.split('=')[0]
# convert uid/gid to numeric value from user/group name
name_id_opts = {'uid': 'user.info',
'gid': 'group.info'}
if opt.split('=')[0] in name_id_opts and len(opt.split('=')) > 1:
_givenid = opt.split('=')[1]
_param = opt.split('=')[0]
_id = _givenid
if not re.match('[0-9]+$', _givenid):
_info = __salt__[name_id_opts[_param]](_givenid)
if _info and _param in _info:
_id = _info[_param]
opt = _param + '=' + six.text_type(_id)
_active_superopts = active[real_name].get('superopts', [])
for _active_opt in _active_superopts:
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', _active_opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
_active_superopts.remove(_active_opt)
_active_opt = "size={0}k".format(converted_size)
_active_superopts.append(_active_opt)
if opt not in active[real_name]['opts'] \
and opt not in _active_superopts \
and opt not in mount_invisible_options \
and opt not in mount_ignore_fs_keys.get(fstype, []) \
and opt not in mount_invisible_keys:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "Remount would be forced because options ({0}) changed".format(opt)
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
# Update the cache
update_mount_cache = True
mount_cache = __salt__['mount.read_mount_cache'](real_name)
if 'opts' in mount_cache:
_missing = [opt for opt in mount_cache['opts']
if opt not in opts]
if _missing:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Remount would be forced because'
' options ({0})'
'changed'.format(','.join(_missing)))
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
update_mount_cache = True
else:
update_mount_cache = True
if real_device not in device_list:
# name matches but device doesn't - need to umount
_device_mismatch_is_ignored = None
for regex in list(device_name_regex):
for _device in device_list:
if re.match(regex, _device):
_device_mismatch_is_ignored = _device
break
if _device_mismatch_is_ignored:
ret['result'] = True
ret['comment'] = "An umount will not be forced " \
+ "because device matched device_name_regex: " \
+ _device_mismatch_is_ignored
elif __opts__['test']:
ret['result'] = None
ret['comment'] = "An umount would have been forced " \
+ "because devices do not match. Watched: " \
+ device
else:
ret['changes']['umount'] = "Forced unmount because devices " \
+ "don't match. Wanted: " + device
if real_device != device:
ret['changes']['umount'] += " (" + real_device + ")"
ret['changes']['umount'] += ", current: " + ', '.join(device_list)
out = __salt__['mount.umount'](real_name, user=user)
active = __salt__['mount.active'](extended=True)
if real_name in active:
ret['comment'] = "Unable to unmount"
ret['result'] = None
return ret
update_mount_cache = True
else:
ret['comment'] = 'Target was already mounted'
# using a duplicate check so I can catch the results of a umount
if real_name not in active:
if mount:
# The mount is not present! Mount it
if __opts__['test']:
ret['result'] = None
if os.path.exists(name):
ret['comment'] = '{0} would be mounted'.format(name)
elif mkmnt:
ret['comment'] = '{0} would be created and mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would not be created'.format(name)
return ret
if not os.path.exists(name) and not mkmnt:
ret['result'] = False
ret['comment'] = 'Mount directory is not present'
return ret
out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user)
active = __salt__['mount.active'](extended=True)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to (re)mount, the state has failed!
ret['comment'] = out
ret['result'] = False
return ret
elif real_name in active:
# (Re)mount worked!
ret['comment'] = 'Target was successfully mounted'
ret['changes']['mount'] = True
elif not os.path.exists(name):
if __opts__['test']:
ret['result'] = None
if mkmnt:
ret['comment'] = '{0} would be created, but not mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would neither be created nor mounted'.format(name)
elif mkmnt:
__salt__['file.mkdir'](name, user=user)
ret['comment'] = '{0} was created, not mounted'.format(name)
else:
ret['comment'] = '{0} not present and not mounted'.format(name)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} would not be mounted'.format(name)
else:
ret['comment'] = '{0} not mounted'.format(name)
if persist:
if '/etc/fstab' == config:
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin']:
config = "/etc/auto_salt"
# Override default for AIX
elif 'AIX' in __grains__['os']:
config = "/etc/filesystems"
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config,
test=True)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
test=True,
match_on=match_on)
if out != 'present':
ret['result'] = None
if out == 'new':
if mount:
comment = ('{0} is mounted, but needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
else:
comment = ('{0} needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
elif out == 'change':
if mount:
comment = ('{0} is mounted, but its fstab entry '
'must be updated.').format(name)
else:
comment = ('The {0} fstab entry '
'must be updated.').format(name)
else:
ret['result'] = False
comment = ('Unable to detect fstab status for '
'mount point {0} due to unexpected '
'output \'{1}\' from call to '
'mount.set_fstab. This is most likely '
'a bug.').format(name, out)
if 'comment' in ret:
ret['comment'] = '{0}. {1}'.format(ret['comment'], comment)
else:
ret['comment'] = comment
return ret
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
match_on=match_on)
if update_mount_cache:
cache_result = __salt__['mount.write_mount_cache'](real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
mount_opts=opts)
if out == 'present':
ret['comment'] += '. Entry already exists in the fstab.'
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += '. Added new entry to the fstab.'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += '. Updated the entry in the fstab.'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += '. However, the fstab was not found.'
return ret
return ret
def swap(name, persist=True, config='/etc/fstab'):
'''
Activates a swap device
.. code-block:: yaml
/root/swapfile:
mount.swap
.. note::
``swap`` does not currently support LABEL
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
on_ = __salt__['mount.swaps']()
if __salt__['file.is_link'](name):
real_swap_device = __salt__['file.readlink'](name)
if not real_swap_device.startswith('/'):
real_swap_device = '/dev/{0}'.format(os.path.basename(real_swap_device))
else:
real_swap_device = name
if real_swap_device in on_:
ret['comment'] = 'Swap {0} already active'.format(name)
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Swap {0} is set to be activated'.format(name)
else:
__salt__['mount.swapon'](real_swap_device)
on_ = __salt__['mount.swaps']()
if real_swap_device in on_:
ret['comment'] = 'Swap {0} activated'.format(name)
ret['changes'] = on_[real_swap_device]
else:
ret['comment'] = 'Swap {0} failed to activate'.format(name)
ret['result'] = False
if persist:
device_key_name = 'device'
if 'AIX' in __grains__['os']:
device_key_name = 'dev'
if '/etc/fstab' == config:
# Override default for AIX
config = "/etc/filesystems"
fstab_data = __salt__['mount.filesystems'](config)
else:
fstab_data = __salt__['mount.fstab'](config)
if __opts__['test']:
if name not in fstab_data and name not in [fstab_data[item]['device'] for item in fstab_data]:
ret['result'] = None
if name in on_:
ret['comment'] = ('Swap {0} is set to be added to the '
'fstab and to be activated').format(name)
return ret
if 'none' in fstab_data:
if fstab_data['none'][device_key_name] == name and \
fstab_data['none']['fstype'] != 'swap':
return ret
if 'AIX' in __grains__['os']:
out = None
ret['result'] = False
ret['comment'] += '. swap not present in /etc/filesystems on AIX.'
return ret
else:
# present, new, change, bad config
# Make sure the entry is in the fstab
out = __salt__['mount.set_fstab']('none',
name,
'swap',
['defaults'],
0,
0,
config)
if out == 'present':
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += '. Added new entry to the fstab.'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += '. Updated the entry in the fstab.'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += '. However, the fstab was not found.'
return ret
return ret
def unmounted(name,
device=None,
config='/etc/fstab',
persist=False,
user=None,
**kwargs):
'''
.. versionadded:: 0.17.0
Verify that a device is not mounted
name
The path to the location where the device is to be unmounted from
device
The device to be unmounted. This is optional because the device could
be mounted in multiple places.
.. versionadded:: 2015.5.0
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, Default is ``False``
user
The user to own the mount; this defaults to the user salt is
running as on the minion
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.unmounted'
return ret
# Get the active data
active = __salt__['mount.active'](extended=True)
if name not in active:
# Nothing to unmount
ret['comment'] = 'Target was already unmounted'
if name in active:
# The mount is present! Unmount it
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Mount point {0} is mounted but should not '
'be').format(name)
return ret
if device:
out = __salt__['mount.umount'](name, device, user=user)
update_mount_cache = True
else:
out = __salt__['mount.umount'](name, user=user)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to umount, the state has failed!
ret['comment'] = out
ret['result'] = False
elif out is True:
# umount worked!
ret['comment'] = 'Target was successfully unmounted'
ret['changes']['umount'] = True
else:
ret['comment'] = 'Execute set to False, Target was not unmounted'
ret['result'] = True
if update_mount_cache:
cache_result = __salt__['mount.delete_mount_cache'](name)
if persist:
device_key_name = 'device'
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and config == '/etc/fstab':
config = "/etc/auto_salt"
fstab_data = __salt__['mount.automaster'](config)
elif 'AIX' in __grains__['os']:
device_key_name = 'dev'
if config == '/etc/fstab':
config = "/etc/filesystems"
fstab_data = __salt__['mount.filesystems'](config)
else:
fstab_data = __salt__['mount.fstab'](config)
if name not in fstab_data:
ret['comment'] += '. fstab entry not found'
else:
if device:
if fstab_data[name][device_key_name] != device:
ret['comment'] += '. fstab entry for device {0} not found'.format(device)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Mount point {0} is unmounted but needs to '
'be purged from {1} to be made '
'persistent').format(name, config)
return ret
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.rm_automaster'](name, device, config)
elif 'AIX' in __grains__['os']:
out = __salt__['mount.rm_filesystems'](name, device, config)
else:
out = __salt__['mount.rm_fstab'](name, device, config)
if out is not True:
ret['result'] = False
ret['comment'] += '. Failed to persist purge'
else:
ret['comment'] += '. Removed target from fstab'
ret['changes']['persist'] = 'purged'
return ret
def mod_watch(name, user=None, **kwargs):
'''
The mounted watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
name
The name of the mount point
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if kwargs['sfun'] == 'mounted':
out = __salt__['mount.remount'](name, kwargs['device'], False, kwargs['fstype'], kwargs['opts'], user=user)
if out:
ret['comment'] = '{0} remounted'.format(name)
else:
ret['result'] = False
ret['comment'] = '{0} failed to remount: {1}'.format(name, out)
else:
ret['comment'] = 'Watch not supported in {0} at this time'.format(kwargs['sfun'])
return ret
def _convert_to(maybe_device, convert_to):
'''
Convert a device name, UUID or LABEL to a device name, UUID or
LABEL.
Return the fs_spec required for fstab.
'''
# Fast path. If we already have the information required, we can
# save one blkid call
if not convert_to or \
(convert_to == 'device' and maybe_device.startswith('/')) or \
maybe_device.startswith('{}='.format(convert_to.upper())):
return maybe_device
# Get the device information
if maybe_device.startswith('/'):
blkid = __salt__['disk.blkid'](maybe_device)
else:
blkid = __salt__['disk.blkid'](token=maybe_device)
result = None
if len(blkid) == 1:
if convert_to == 'device':
result = list(blkid.keys())[0]
else:
key = convert_to.upper()
result = '{}={}'.format(key, list(blkid.values())[0][key])
return result
def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults',
fs_freq=0, fs_passno=0, mount_by=None,
config='/etc/fstab', mount=True, match_on='auto',
not_change=False):
'''Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
not_change
By default, if the entry is found in the fstab file but is
different from the expected content (like different options),
the entry will be replaced with the correct content. If this
parameter is set to ``True`` and the line is found, the
original content will be preserved.
'''
ret = {
'name': name,
'result': False,
'changes': {},
'comment': [],
}
# Adjust fs_mntops based on the OS
if fs_mntops == 'defaults':
if __grains__['os'] in ['MacOS', 'Darwin']:
fs_mntops = 'noowners'
elif __grains__['os'] == 'AIX':
fs_mntops = ''
# Adjust the config file based on the OS
if config == '/etc/fstab':
if __grains__['os'] in ['MacOS', 'Darwin']:
config = '/etc/auto_salt'
elif __grains__['os'] == 'AIX':
config = '/etc/filesystems'
if not fs_file == '/':
fs_file = fs_file.rstrip('/')
fs_spec = _convert_to(name, mount_by)
# Validate that the device is valid after the conversion
if not fs_spec:
msg = 'Device {} cannot be converted to {}'
ret['comment'].append(msg.format(name, mount_by))
return ret
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
test=True,
not_change=not_change)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
test=True,
match_on=match_on,
not_change=not_change)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
test=True,
match_on=match_on,
not_change=not_change)
ret['result'] = None
if out == 'present':
msg = '{} entry is already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
msg = '{} entry will be written in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
msg = '{} entry will be updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be created in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
not_change=not_change)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
match_on=match_on,
not_change=not_change)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
match_on=match_on,
not_change=not_change)
ret['result'] = True
if out == 'present':
msg = '{} entry was already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
ret['changes']['persist'] = out
msg = '{} entry added in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
ret['changes']['persist'] = out
msg = '{} entry updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be changed in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret
def fstab_absent(name, fs_file, mount_by=None, config='/etc/fstab'):
'''
Makes sure that a fstab mount point is absent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives
'''
ret = {
'name': name,
'result': False,
'changes': {},
'comment': [],
}
# Adjust the config file based on the OS
if config == '/etc/fstab':
if __grains__['os'] in ['MacOS', 'Darwin']:
config = '/etc/auto_salt'
elif __grains__['os'] == 'AIX':
config = '/etc/filesystems'
if not fs_file == '/':
fs_file = fs_file.rstrip('/')
fs_spec = _convert_to(name, mount_by)
if __grains__['os'] in ['MacOS', 'Darwin']:
fstab_data = __salt__['mount.automaster'](config)
elif __grains__['os'] == 'AIX':
fstab_data = __salt__['mount.filesystems'](config)
else:
fstab_data = __salt__['mount.fstab'](config)
if __opts__['test']:
ret['result'] = None
if fs_file not in fstab_data:
msg = '{} entry is already missing in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
msg = '{} entry will be removed from {}.'
ret['comment'].append(msg.format(fs_file, config))
return ret
if fs_file in fstab_data:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.rm_automaster'](name=fs_file,
device=fs_spec,
config=config)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.rm_filesystems'](name=fs_file,
device=fs_spec,
config=config)
else:
out = __salt__['mount.rm_fstab'](name=fs_file,
device=fs_spec,
config=config)
if out is not True:
ret['result'] = False
msg = '{} entry failed when removing from {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = True
ret['changes']['persist'] = 'removed'
msg = '{} entry removed from {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = True
msg = '{} entry is already missing in {}.'
ret['comment'].append(msg.format(fs_file, config))
return ret
|
the-stack_0_27920
|
# alias to keep the 'bytecode' variable free
import sys
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import UNSET, Label, SetLineno, Instr
from _pydevd_frame_eval.vendored.bytecode.flags import infer_flags
class BaseBytecode:
def __init__(self):
self.argcount = 0
if sys.version_info > (3, 8):
self.posonlyargcount = 0
self.kwonlyargcount = 0
self.first_lineno = 1
self.name = "<module>"
self.filename = "<string>"
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
self._flags = _bytecode.CompilerFlags(0)
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
if sys.version_info > (3, 8):
self.posonlyargcount = bytecode.posonlyargcount
self.kwonlyargcount = bytecode.kwonlyargcount
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if sys.version_info > (3, 8):
if self.posonlyargcount != other.posonlyargcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
if self.compute_stacksize() != other.compute_stacksize():
return False
return True
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if not isinstance(value, _bytecode.CompilerFlags):
value = _bytecode.CompilerFlags(value)
self._flags = value
def update_flags(self, *, is_async=None):
self.flags = infer_flags(self, is_async)
class _BaseBytecodeList(BaseBytecode, list):
"""List subclass providing type stable slicing and copying.
"""
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value._copy_attr_from(self)
return value
def copy(self):
new = type(self)(super().copy())
new._copy_attr_from(self)
return new
def legalize(self):
"""Check that all the element of the list are valid and remove SetLineno.
"""
lineno_pos = []
set_lineno = None
current_lineno = self.first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = instr.lineno
lineno_pos.append(pos)
continue
# Filter out Labels
if not isinstance(instr, Instr):
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
raise NotImplementedError()
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append("label_instr%s" % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return self._flat() == other._flat()
class Bytecode(_InstrList, _BaseBytecodeList):
def __init__(self, instructions=()):
BaseBytecode.__init__(self)
self.argnames = []
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (Label, SetLineno, Instr)):
raise ValueError(
"Bytecode must only contain Label, "
"SetLineno, and Instr objects, "
"but %s was found" % type(instr).__name__
)
def _copy_attr_from(self, bytecode):
super()._copy_attr_from(bytecode)
if isinstance(bytecode, Bytecode):
self.argnames = bytecode.argnames
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def compute_stacksize(self):
cfg = _bytecode.ControlFlowGraph.from_bytecode(self)
return cfg.compute_stacksize()
def to_code(self, compute_jumps_passes=None, stacksize=None):
bc = self.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
return bc.to_code(stacksize=stacksize)
def to_concrete_bytecode(self, compute_jumps_passes=None):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
|
the-stack_0_27921
|
# Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from collections import defaultdict
import datetime
import io
import os
import re
from tempfile import mkstemp
from django.http import Http404
from django.db.models import F, Prefetch
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
import debug # pyflakes:ignore
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.group.utils import can_manage_some_groups, can_manage_group
from ietf.ietfauth.utils import has_role, user_is_person
from ietf.liaisons.utils import get_person_for_user
from ietf.mailtrigger.utils import gather_address_lists
from ietf.person.models import Person
from ietf.meeting.models import Meeting, Schedule, TimeSlot, SchedTimeSessAssignment, ImportantDate, SchedulingEvent, Session
from ietf.meeting.utils import session_requested_by, add_event_info_to_session_qs
from ietf.name.models import ImportantDateName, SessionPurposeName
from ietf.utils import log
from ietf.utils.history import find_history_replacements_active_at
from ietf.utils.mail import send_mail
from ietf.utils.pipe import pipe
from ietf.utils.text import xslugify
def get_meeting(num=None,type_in=['ietf',],days=28):
meetings = Meeting.objects
if type_in:
meetings = meetings.filter(type__in=type_in)
if num == None:
meetings = meetings.filter(date__gte=datetime.datetime.today()-datetime.timedelta(days=days)).order_by('date')
else:
meetings = meetings.filter(number=num)
if meetings.exists():
return meetings.first()
else:
raise Http404("No such meeting found: %s" % num)
def get_current_ietf_meeting():
meetings = Meeting.objects.filter(type='ietf',date__gte=datetime.datetime.today()-datetime.timedelta(days=31)).order_by('date')
return meetings.first()
def get_current_ietf_meeting_num():
return get_current_ietf_meeting().number
def get_ietf_meeting(num=None):
if num:
meeting = Meeting.objects.filter(number=num).first()
else:
meeting = get_current_ietf_meeting()
return meeting
def get_schedule(meeting, name=None):
if name is None:
schedule = meeting.schedule
else:
schedule = get_object_or_404(meeting.schedule_set, name=name)
return schedule
# seems this belongs in ietf/person/utils.py?
def get_person_by_email(email):
# email == None may actually match people who haven't set an email!
if email is None:
return None
return Person.objects.filter(email__address=email).distinct().first()
def get_schedule_by_name(meeting, owner, name):
if owner is not None:
return meeting.schedule_set.filter(owner = owner, name = name).first()
else:
return meeting.schedule_set.filter(name = name).first()
def preprocess_assignments_for_agenda(assignments_queryset, meeting, extra_prefetches=()):
"""Add computed properties to assignments
For each assignment a, adds
a.start_timestamp
a.end_timestamp
a.session.historic_group
a.session.historic_parent
a.session.rescheduled_to (if rescheduled)
a.session.prefetched_active_materials
"""
assignments_queryset = assignments_queryset.prefetch_related(
'timeslot', 'timeslot__type', 'timeslot__meeting',
'timeslot__location', 'timeslot__location__floorplan', 'timeslot__location__urlresource_set',
Prefetch(
"session",
queryset=add_event_info_to_session_qs(Session.objects.all().prefetch_related(
'group', 'group__charter', 'group__charter__group',
Prefetch('materials',
queryset=Document.objects.exclude(states__type=F("type"), states__slug='deleted').order_by('sessionpresentation__order').prefetch_related('states'),
to_attr='prefetched_active_materials'
)
))
),
*extra_prefetches
)
# removed list(); it was consuming a very large amount of processor time
# assignments = list(assignments_queryset) # make sure we're set in stone
assignments = assignments_queryset
meeting_time = datetime.datetime.combine(meeting.date, datetime.time())
# replace groups with historic counterparts
groups = [ ]
for a in assignments:
if a.session:
a.session.historic_group = None
a.session.order_number = None
if a.session.group and a.session.group not in groups:
groups.append(a.session.group)
sessions_for_groups = defaultdict(list)
for a in assignments:
if a.session and a.session.group:
sessions_for_groups[(a.session.group, a.session.type_id)].append(a)
group_replacements = find_history_replacements_active_at(groups, meeting_time)
parent_id_set = set()
for a in assignments:
if a.session and a.session.group:
a.session.historic_group = group_replacements.get(a.session.group_id)
if a.session.historic_group:
a.session.historic_group.historic_parent = None
if a.session.historic_group.parent_id:
parent_id_set.add(a.session.historic_group.parent_id)
l = sessions_for_groups.get((a.session.group, a.session.type_id), [])
a.session.order_number = l.index(a) + 1 if a in l else 0
parents = Group.objects.filter(pk__in=parent_id_set)
parent_replacements = find_history_replacements_active_at(parents, meeting_time)
timeslot_by_session_pk = {a.session_id: a.timeslot for a in assignments}
for a in assignments:
if a.session and a.session.historic_group and a.session.historic_group.parent_id:
a.session.historic_group.historic_parent = parent_replacements.get(a.session.historic_group.parent_id)
if a.session.current_status == 'resched':
a.session.rescheduled_to = timeslot_by_session_pk.get(a.session.tombstone_for_id)
for d in a.session.prefetched_active_materials:
# make sure these are precomputed with the meeting instead
# of having to look it up
d.get_href(meeting=meeting)
d.get_versionless_href(meeting=meeting)
a.start_timestamp = int(a.timeslot.utc_start_time().timestamp())
a.end_timestamp = int(a.timeslot.utc_end_time().timestamp())
return assignments
class AgendaKeywordTool:
"""Base class for agenda keyword-related organizers
The purpose of this class is to hold utility methods and data needed by keyword generation
helper classes. It ensures consistency of, e.g., definitions of when to use legacy keywords or what
timeslot types should be used to define filters.
"""
def __init__(self, *, assignments=None, sessions=None):
# n.b., single star argument means only keyword parameters are allowed when calling constructor
if assignments is not None and sessions is None:
self.assignments = assignments
self.sessions = [a.session for a in self.assignments if a.session]
elif sessions is not None and assignments is None:
self.assignments = None
self.sessions = sessions
else:
raise RuntimeError('Exactly one of assignments or sessions must be specified')
self.meeting = self.sessions[0].meeting if len(self.sessions) > 0 else None
def _use_legacy_keywords(self):
"""Should legacy keyword handling be used for this meeting?"""
# Only IETF meetings need legacy handling. These are identified
# by having a purely numeric meeting.number.
return (self.meeting is not None
and self.meeting.number.isdigit()
and int(self.meeting.number) <= settings.MEETING_LEGACY_OFFICE_HOURS_END)
# Helper methods
@staticmethod
def _get_group(s):
"""Get group of a session, handling historic groups"""
return getattr(s, 'historic_group', s.group)
def _get_group_parent(self, s):
"""Get parent of a group or parent of a session's group, handling historic groups"""
g = self._get_group(s) if isinstance(s, Session) else s # accept a group or a session arg
return getattr(g, 'historic_parent', g.parent)
def _purpose_keyword(self, purpose):
"""Get the keyword corresponding to a session purpose"""
return purpose.slug.lower()
def _group_keyword(self, group):
"""Get the keyword corresponding to a session group"""
return group.acronym.lower()
def _session_name_keyword(self, session):
"""Get the keyword identifying a session by name"""
return xslugify(session.name) if session.name else None
@property
def filterable_purposes(self):
return SessionPurposeName.objects.exclude(slug='none').order_by('name')
class AgendaFilterOrganizer(AgendaKeywordTool):
"""Helper class to organize agenda filters given a list of assignments or sessions
Either assignments or sessions must be specified (but not both). Keywords should be applied
to these items before calling either of the 'get_' methods, otherwise some special filters
may not be included (e.g., 'BoF' or 'Plenary'). If historic_group and/or historic_parent
attributes are present, these will be used instead of group/parent.
The organizer will process its inputs once, when one of its get_ methods is first called.
Terminology:
* column: group of related buttons, usually with a heading button.
* heading: button at the top of a column, e.g. an area. Has a keyword that applies to all in its column.
* category: a set of columns displayed as separate from other categories
* group filters: filters whose keywords derive from the group owning the session, such as for working groups
* non-group filters: filters whose keywords come from something other than a session's group
* special filters: group filters of type "special" that have no heading, end up in the catch-all column
* extra filters: ad hoc filters created based on the extra_labels list, go in the catch-all column
* catch-all column: column with no heading where extra filters and special filters are gathered
"""
# group acronyms in this list will never be used as filter buttons
exclude_acronyms = ('iesg', 'ietf', 'secretariat')
# extra keywords to include in the no-heading column if they apply to any sessions
extra_labels = ('BoF',)
# group types whose acronyms should be word-capitalized
capitalized_group_types = ('team',)
# group types whose acronyms should be all-caps
uppercased_group_types = ('area', 'ietf', 'irtf')
# check that the group labeling sets are disjoint
assert(set(capitalized_group_types).isdisjoint(uppercased_group_types))
# group acronyms that need special handling
special_group_labels = dict(edu='EDU', iepg='IEPG')
def __init__(self, *, single_category=False, **kwargs):
super(AgendaFilterOrganizer, self).__init__(**kwargs)
self.single_category = single_category
# filled in when _organize_filters() is called
self.filter_categories = None
self.special_filters = None
if self._use_legacy_keywords():
self.extra_labels += ('Plenary',) # need this when not using session purpose
def get_non_area_keywords(self):
"""Get list of any 'non-area' (aka 'special') keywords
These are the keywords corresponding to the right-most, headingless button column.
"""
if self.special_filters is None:
self._organize_filters()
return [sf['keyword'] for sf in self.special_filters['children']]
def get_filter_categories(self):
"""Get a list of filter categories
If single_category is True, this will be a list with one element. Otherwise it
may have multiple elements. Each element is a list of filter columns.
"""
if self.filter_categories is None:
self._organize_filters()
return self.filter_categories
def _organize_filters(self):
"""Process inputs to construct and categorize filter lists"""
headings, special = self._group_filter_headings()
self.filter_categories = self._categorize_group_filters(headings)
# Create an additional category with non-group filters and special/extra filters
non_group_category = self._non_group_filters()
# special filters include self.extra_labels and any 'special' group filters
self.special_filters = self._extra_filters()
for g in special:
self.special_filters['children'].append(self._group_filter_entry(g))
if len(self.special_filters['children']) > 0:
self.special_filters['children'].sort(key=self._group_sort_key)
non_group_category.append(self.special_filters)
# if we have any additional filters, add them
if len(non_group_category) > 0:
if self.single_category:
# if a single category is requested, just add them to that category
self.filter_categories[0].extend(non_group_category)
else:
# otherwise add these as a separate category
self.filter_categories.append(non_group_category)
def _group_filter_headings(self):
"""Collect group-based filters
Output is a tuple (dict(group->set), set). The dict keys are groups to be used as headings
with sets of child groups as associated values. The set is 'special' groups that have no
heading group.
"""
# groups in the schedule that have a historic_parent group
groups = set(self._get_group(s) for s in self.sessions
if s
and self._get_group(s))
log.assertion('len(groups) == len(set(g.acronym for g in groups))') # no repeated acros
group_parents = set(self._get_group_parent(g) for g in groups if self._get_group_parent(g))
log.assertion('len(group_parents) == len(set(gp.acronym for gp in group_parents))') # no repeated acros
all_groups = groups.union(group_parents)
all_groups.difference_update([g for g in all_groups if g.acronym in self.exclude_acronyms])
headings = {g: set()
for g in all_groups
if g.features.agenda_filter_type_id == 'heading'}
special = set(g for g in all_groups
if g.features.agenda_filter_type_id == 'special')
for g in groups:
if g.features.agenda_filter_type_id == 'normal':
# normal filter group with a heading parent goes in that column
p = self._get_group_parent(g)
if p in headings:
headings[p].add(g)
else:
# normal filter group with no heading parent is 'special'
special.add(g)
return headings, special
def _categorize_group_filters(self, headings):
"""Categorize the group-based filters
Returns a list of one or more categories of filter columns. When single_category is True,
it will always be only one category.
"""
area_category = [] # headings are area groups
non_area_category = [] # headings are non-area groups
for h in headings:
if h.type_id == 'area' or self.single_category:
area_category.append(self._group_filter_column(h, headings[h]))
else:
non_area_category.append(self._group_filter_column(h, headings[h]))
area_category.sort(key=self._group_sort_key)
if self.single_category:
return [area_category]
non_area_category.sort(key=self._group_sort_key)
return [area_category, non_area_category]
def _non_group_filters(self):
"""Get list of non-group filter columns
Empty columns will be omitted.
"""
if self.sessions is None:
sessions = [a.session for a in self.assignments]
else:
sessions = self.sessions
# Call legacy version for older meetings
if self._use_legacy_keywords():
return self._legacy_non_group_filters(sessions)
# Not using legacy version
filter_cols = []
for purpose in self.filterable_purposes:
if purpose.slug == 'regular':
continue
# Map label to its keyword, discarding duplicate labels.
# This does what we want as long as sessions with the same
# name and purpose belong to the same group.
sessions_by_name = {
session.name: session
for session in sessions if session.purpose == purpose
}
if len(sessions_by_name) > 0:
# keyword needs to match what's tagged in filter_keywords_for_session()
heading_kw = self._purpose_keyword(purpose)
children = []
for name, session in sessions_by_name.items():
children.append(self._filter_entry(
label=name,
keyword=self._session_name_keyword(session),
toggled_by=[self._group_keyword(session.group)] if session.group else None,
is_bof=False,
))
column = self._filter_column(
label=purpose.name,
keyword=heading_kw,
children=children,
)
filter_cols.append(column)
return filter_cols
def _legacy_non_group_filters(self, sessions):
"""Get list of non-group filters for older meetings
Returns a list of filter columns
"""
office_hours_items = set()
suffix = ' office hours'
for s in sessions:
if s.name.lower().endswith(suffix):
office_hours_items.add((s.name[:-len(suffix)].strip(), s.group))
headings = []
# currently we only do office hours
if len(office_hours_items) > 0:
column = self._filter_column(
label='Office Hours',
keyword='officehours',
children=[
self._filter_entry(
label=label,
keyword=f'{label.lower().replace(" ", "")}-officehours',
toggled_by=[self._group_keyword(group)] if group else None,
is_bof=False,
)
for label, group in sorted(office_hours_items, key=lambda item: item[0].upper())
])
headings.append(column)
return headings
def _extra_filters(self):
"""Get list of filters corresponding to self.extra_labels"""
item_source = self.assignments or self.sessions or []
candidates = set(self.extra_labels)
return self._filter_column(
label=None,
keyword=None,
children=[
self._filter_entry(label=label, keyword=xslugify(label), toggled_by=[], is_bof=False)
for label in candidates if any(
# Keep only those that will affect at least one session
[label.lower() in item.filter_keywords for item in item_source]
)]
)
@staticmethod
def _filter_entry(label, keyword, is_bof, toggled_by=None):
"""Construct a filter entry representation"""
# get our own copy of the list for toggled_by
if toggled_by is None:
toggled_by = []
if is_bof:
toggled_by = ['bof'] + toggled_by
return dict(
label=label,
keyword=keyword,
toggled_by=toggled_by,
is_bof=is_bof,
)
def _filter_column(self, label, keyword, children):
"""Construct a filter column given a label, keyword, and list of child entries"""
entry = self._filter_entry(label, keyword, False) # heading
entry['children'] = children
# all children should be controlled by the heading keyword
if keyword:
for child in children:
if keyword not in child['toggled_by']:
child['toggled_by'] = [keyword] + child['toggled_by']
return entry
def _group_label(self, group):
"""Generate a label for a group filter button"""
label = group.acronym
if label in self.special_group_labels:
return self.special_group_labels[label]
elif group.type_id in self.capitalized_group_types:
return label.capitalize()
elif group.type_id in self.uppercased_group_types:
return label.upper()
return label
def _group_filter_entry(self, group):
"""Construct a filter_entry for a group filter button"""
return self._filter_entry(
label=self._group_label(group),
keyword=self._group_keyword(group),
toggled_by=[self._group_keyword(group.parent)] if group.parent else None,
is_bof=group.is_bof(),
)
def _group_filter_column(self, heading, children):
"""Construct a filter column given a heading group and a list of its child groups"""
return self._filter_column(
label=None if heading is None else self._group_label(heading),
keyword=self._group_keyword(heading),
children=sorted([self._group_filter_entry(g) for g in children], key=self._group_sort_key),
)
@staticmethod
def _group_sort_key(g):
return 'zzzzzzzz' if g is None else g['label'].upper() # sort blank to the end
class AgendaKeywordTagger(AgendaKeywordTool):
"""Class for applying keywords to agenda timeslot assignments.
This is the other side of the agenda filtering: AgendaFilterOrganizer generates the
filter buttons, this applies keywords to the entries being filtered.
"""
def apply(self):
"""Apply tags to sessions / assignments"""
if self.assignments is not None:
self._tag_assignments_with_filter_keywords()
else:
self._tag_sessions_with_filter_keywords()
def apply_session_keywords(self):
"""Tag each item with its session-specific keyword"""
if self.assignments is not None:
for a in self.assignments:
a.session_keyword = self.filter_keyword_for_specific_session(a.session)
else:
for s in self.sessions:
s.session_keyword = self.filter_keyword_for_specific_session(s)
def _is_regular_agenda_filter_group(self, group):
"""Should this group appear in the 'regular' agenda filter button lists?"""
parent = self._get_group_parent(group)
return (
group.features.agenda_filter_type_id == 'normal'
and parent
and parent.features.agenda_filter_type_id == 'heading'
)
def _tag_assignments_with_filter_keywords(self):
"""Add keywords for agenda filtering
Keywords are all lower case.
"""
for a in self.assignments:
a.filter_keywords = self._filter_keywords_for_assignment(a)
a.filter_keywords = sorted(list(a.filter_keywords))
def _tag_sessions_with_filter_keywords(self):
for s in self.sessions:
s.filter_keywords = self._filter_keywords_for_session(s)
s.filter_keywords = sorted(list(s.filter_keywords))
@staticmethod
def _legacy_extra_session_keywords(session):
"""Get extra keywords for a session at a legacy meeting"""
extra = []
if session.type_id == 'plenary':
extra.append('plenary')
office_hours_match = re.match(r'^ *\w+(?: +\w+)* +office hours *$', session.name, re.IGNORECASE)
if office_hours_match is not None:
suffix = 'officehours'
extra.extend([
'officehours',
session.name.lower().replace(' ', '')[:-len(suffix)] + '-officehours',
])
return extra
def _filter_keywords_for_session(self, session):
keywords = set()
if session.purpose in self.filterable_purposes:
keywords.add(self._purpose_keyword(session.purpose))
group = self._get_group(session)
if group is not None:
if group.state_id == 'bof':
keywords.add('bof')
keywords.add(self._group_keyword(group))
specific_kw = self.filter_keyword_for_specific_session(session)
if specific_kw is not None:
keywords.add(specific_kw)
kw = self._session_name_keyword(session)
if kw:
keywords.add(kw)
# Only sessions belonging to "regular" groups should respond to the
# parent group filter keyword (often the 'area'). This must match
# the test used by the agenda() view to decide whether a group
# gets an area or non-area filter button.
if self._is_regular_agenda_filter_group(group):
area = self._get_group_parent(group)
if area is not None:
keywords.add(self._group_keyword(area))
if self._use_legacy_keywords():
keywords.update(self._legacy_extra_session_keywords(session))
return sorted(keywords)
def _filter_keywords_for_assignment(self, assignment):
keywords = self._filter_keywords_for_session(assignment.session)
return sorted(keywords)
def filter_keyword_for_specific_session(self, session):
"""Get keyword that identifies a specific session
Returns None if the session cannot be selected individually.
"""
group = self._get_group(session)
if group is None:
return None
kw = self._group_keyword(group) # start with this
token = session.docname_token_only_for_multiple()
return kw if token is None else '{}-{}'.format(kw, token)
def read_session_file(type, num, doc):
# XXXX FIXME: the path fragment in the code below should be moved to
# settings.py. The *_PATH settings should be generalized to format()
# style python format, something like this:
# DOC_PATH_FORMAT = { "agenda": "/foo/bar/agenda-{meeting.number}/agenda-{meeting-number}-{doc.group}*", }
#
# FIXME: uploaded_filename should be replaced with a function call that computes names that are fixed
path = os.path.join(settings.AGENDA_PATH, "%s/%s/%s" % (num, type, doc.uploaded_filename))
if doc.uploaded_filename and os.path.exists(path):
with io.open(path, 'rb') as f:
return f.read(), path
else:
return None, path
def read_agenda_file(num, doc):
return read_session_file('agenda', num, doc)
def convert_draft_to_pdf(doc_name):
inpath = os.path.join(settings.IDSUBMIT_REPOSITORY_PATH, doc_name + ".txt")
outpath = os.path.join(settings.INTERNET_DRAFT_PDF_PATH, doc_name + ".pdf")
try:
infile = io.open(inpath, "r")
except IOError:
return
t,tempname = mkstemp()
os.close(t)
tempfile = io.open(tempname, "w")
pageend = 0;
newpage = 0;
formfeed = 0;
for line in infile:
line = re.sub("\r","",line)
line = re.sub("[ \t]+$","",line)
if re.search(r"\[?[Pp]age [0-9ivx]+\]?[ \t]*$",line):
pageend=1
tempfile.write(line)
continue
if re.search("^[ \t]*\f",line):
formfeed=1
tempfile.write(line)
continue
if re.search("^ *INTERNET.DRAFT.+[0-9]+ *$",line) or re.search("^ *Internet.Draft.+[0-9]+ *$",line) or re.search("^draft-[-a-z0-9_.]+.*[0-9][0-9][0-9][0-9]$",line) or re.search("^RFC.+[0-9]+$",line):
newpage=1
if re.search("^[ \t]*$",line) and pageend and not newpage:
continue
if pageend and newpage and not formfeed:
tempfile.write("\f")
pageend=0
formfeed=0
newpage=0
tempfile.write(line)
infile.close()
tempfile.close()
t,psname = mkstemp()
os.close(t)
pipe("enscript --margins 76::76: -B -q -p "+psname + " " +tempname)
os.unlink(tempname)
pipe("ps2pdf "+psname+" "+outpath)
os.unlink(psname)
def schedule_permissions(meeting, schedule, user):
# do this in positive logic.
cansee = False
canedit = False
secretariat = False
if has_role(user, 'Secretariat'):
cansee = True
secretariat = True
# NOTE: secretariat is not superuser for edit!
elif schedule.public:
cansee = True
elif schedule.visible and has_role(user, ['Area Director', 'IAB Chair', 'IRTF Chair']):
cansee = True
if user_is_person(user, schedule.owner):
cansee = True
canedit = not schedule.is_official_record
return cansee, canedit, secretariat
# -------------------------------------------------
# Interim Meeting Helpers
# -------------------------------------------------
def can_approve_interim_request(meeting, user):
'''Returns True if the user has permissions to approve an interim meeting request'''
if not user or isinstance(user,AnonymousUser):
return False
if meeting.type.slug != 'interim':
return False
if has_role(user, 'Secretariat'):
return True
person = get_person_for_user(user)
session = meeting.session_set.first()
if not session:
return False
group = session.group
if group.type.slug in ['wg','ag']:
if group.parent.role_set.filter(name='ad', person=person) or group.role_set.filter(name='ad', person=person):
return True
if group.type.slug in ['rg','rag'] and group.parent.role_set.filter(name='chair', person=person):
return True
if group.type.slug == 'program':
if person.role_set.filter(group__acronym='iab', name='member'):
return True
return False
def can_edit_interim_request(meeting, user):
'''Returns True if the user can edit the interim meeting request'''
if meeting.type.slug != 'interim':
return False
if has_role(user, 'Secretariat'): # Consider removing - can_manage_group should handle this
return True
session = meeting.session_set.first()
if not session:
return False
group = session.group
if can_manage_group(user, group):
return True
elif can_approve_interim_request(meeting, user):
return True
else:
return False
def can_request_interim_meeting(user):
return can_manage_some_groups(user)
def can_view_interim_request(meeting, user):
'''Returns True if the user can see the pending interim request in the pending interim view'''
if meeting.type.slug != 'interim':
return False
session = meeting.session_set.first()
if not session:
return False
group = session.group
return can_manage_group(user, group)
def create_interim_meeting(group, date, city='', country='', timezone='UTC',
person=None):
"""Helper function to create interim meeting and associated schedule"""
if not person:
person = Person.objects.get(name='(System)')
number = get_next_interim_number(group.acronym, date)
meeting = Meeting.objects.create(
number=number,
type_id='interim',
date=date,
days=1,
city=city,
country=country,
time_zone=timezone)
schedule = Schedule.objects.create(
meeting=meeting,
owner=person,
visible=True,
public=True)
meeting.schedule = schedule
meeting.save()
return meeting
def get_announcement_initial(meeting, is_change=False):
'''Returns a dictionary suitable to initialize an InterimAnnouncementForm
(Message ModelForm)'''
group = meeting.session_set.first().group
in_person = bool(meeting.city)
initial = {}
addrs = gather_address_lists('interim_announced',group=group).as_strings()
initial['to'] = addrs.to
initial['cc'] = addrs.cc
initial['frm'] = settings.INTERIM_ANNOUNCE_FROM_EMAIL_PROGRAM if group.type_id=='program' else settings.INTERIM_ANNOUNCE_FROM_EMAIL_DEFAULT
if in_person:
desc = 'Interim'
else:
desc = 'Virtual'
if is_change:
change = ' CHANGED'
else:
change = ''
type = group.type.slug.upper()
if group.type.slug == 'wg' and group.state.slug == 'bof':
type = 'BOF'
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[meeting.schedule, meeting.schedule.base if meeting.schedule else None],
session__in=meeting.session_set.not_canceled()
).order_by('timeslot__time')
initial['subject'] = '{name} ({acronym}) {type} {desc} Meeting: {date}{change}'.format(
name=group.name,
acronym=group.acronym,
type=type,
desc=desc,
date=meeting.date,
change=change)
body = render_to_string('meeting/interim_announcement.txt', locals())
initial['body'] = body
return initial
def get_earliest_session_date(formset):
'''Return earliest date from InterimSession Formset'''
return sorted([f.cleaned_data['date'] for f in formset.forms if f.cleaned_data.get('date')])[0]
def is_interim_meeting_approved(meeting):
return add_event_info_to_session_qs(meeting.session_set.all()).first().current_status == 'apprw'
def get_next_interim_number(acronym,date):
'''
This function takes a group acronym and date object and returns the next number
to use for an interim meeting. The format is interim-[year]-[acronym]-[01-99]
'''
base = 'interim-%s-%s-' % (date.year, acronym)
# can't use count() to calculate the next number in case one was deleted
meetings = Meeting.objects.filter(type='interim', number__startswith=base)
if meetings:
serial = sorted([ int(x.number.split('-')[-1]) for x in meetings ])[-1]
else:
serial = 0
return "%s%02d" % (base, serial+1)
def get_next_agenda_name(meeting):
"""Returns the next name to use for an agenda document for *meeting*"""
group = meeting.session_set.first().group
documents = Document.objects.filter(type='agenda', session__meeting=meeting)
if documents:
sequences = [int(d.name.split('-')[-1]) for d in documents]
last_sequence = sorted(sequences)[-1]
else:
last_sequence = 0
return 'agenda-{meeting}-{group}-{sequence}'.format(
meeting=meeting.number,
group=group.acronym,
sequence=str(last_sequence + 1).zfill(2))
def make_materials_directories(meeting):
'''
This function takes a meeting object and creates the appropriate materials directories
'''
path = meeting.get_materials_path()
# Default umask is 0x022, meaning strip write premission for group and others.
# Change this temporarily to 0x0, to keep write permission for group and others.
# (WHY??) (Note: this code is old -- was present already when the secretariat code
# was merged with the regular datatracker code; then in secr/proceedings/views.py
# in make_directories())
saved_umask = os.umask(0)
for leaf in ('slides','agenda','minutes','id','rfc','bluesheets'):
target = os.path.join(path,leaf)
if not os.path.exists(target):
os.makedirs(target)
os.umask(saved_umask)
def send_interim_approval_request(meetings):
"""Sends an email to the secretariat, group chairs, and responsible area
director or the IRTF chair noting that approval has been requested for a
new interim meeting. Takes a list of one or more meetings."""
first_session = meetings[0].session_set.first()
group = first_session.group
requester = session_requested_by(first_session)
(to_email, cc_list) = gather_address_lists('session_requested',group=group,person=requester)
from_email = (settings.SESSION_REQUEST_FROM_EMAIL)
subject = '{group} - New Interim Meeting Request'.format(group=group.acronym)
template = 'meeting/interim_approval_request.txt'
approval_urls = []
for meeting in meetings:
url = settings.IDTRACKER_BASE_URL + reverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number})
approval_urls.append(url)
if len(meetings) > 1:
is_series = True
else:
is_series = False
approver_set = set()
for authrole in group.features.groupman_authroles: # NOTE: This makes an assumption that the authroles are exactly the set of approvers
approver_set.add(authrole)
approvers = list(approver_set)
context = {
'group': group,
'is_series': is_series,
'meetings': meetings,
'approvers': approvers,
'requester': requester,
'approval_urls': approval_urls,
}
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc=cc_list)
def send_interim_approval(user, meeting):
"""Send an email to chairs and whoever initiated the action that resulted in approval that an interim is approved"""
first_session = meeting.session_set.first()
(to_email,cc_list) = gather_address_lists('interim_approved',group=first_session.group,person=user.person)
from_email = (settings.SESSION_REQUEST_FROM_EMAIL)
subject = f'{meeting.number} interim approved'
template = 'meeting/interim_approval.txt'
context = {
'meeting': meeting,
}
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc=cc_list)
def send_interim_announcement_request(meeting):
"""Sends an email to the secretariat that an interim meeting is ready for
announcement, includes the link to send the official announcement"""
first_session = meeting.session_set.first()
group = first_session.group
requester = session_requested_by(first_session)
(to_email, cc_list) = gather_address_lists('interim_announce_requested')
from_email = (settings.SESSION_REQUEST_FROM_EMAIL)
subject = '{group} - interim meeting ready for announcement'.format(group=group.acronym)
template = 'meeting/interim_announcement_request.txt'
announce_url = settings.IDTRACKER_BASE_URL + reverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number})
context = locals()
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc_list)
def send_interim_meeting_cancellation_notice(meeting):
"""Sends an email that a scheduled interim meeting has been cancelled."""
session = meeting.session_set.first()
group = session.group
(to_email, cc_list) = gather_address_lists('interim_cancelled',group=group)
from_email = settings.INTERIM_ANNOUNCE_FROM_EMAIL_PROGRAM if group.type_id=='program' else settings.INTERIM_ANNOUNCE_FROM_EMAIL_DEFAULT
subject = '{group} ({acronym}) {type} Interim Meeting Cancelled (was {date})'.format(
group=group.name,
acronym=group.acronym,
type=group.type.slug.upper(),
date=meeting.date.strftime('%Y-%m-%d'))
start_time = session.official_timeslotassignment().timeslot.time
end_time = start_time + session.requested_duration
is_multi_day = session.meeting.session_set.with_current_status().filter(current_status='sched').count() > 1
template = 'meeting/interim_meeting_cancellation_notice.txt'
context = locals()
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc=cc_list)
def send_interim_session_cancellation_notice(session):
"""Sends an email that one session of a scheduled interim meeting has been cancelled."""
group = session.group
start_time = session.official_timeslotassignment().timeslot.time
end_time = start_time + session.requested_duration
(to_email, cc_list) = gather_address_lists('interim_cancelled',group=group)
from_email = settings.INTERIM_ANNOUNCE_FROM_EMAIL_PROGRAM if group.type_id=='program' else settings.INTERIM_ANNOUNCE_FROM_EMAIL_DEFAULT
if session.name:
description = '"%s" session' % session.name
else:
description = 'interim meeting session'
subject = '{group} ({acronym}) {type} {description} cancelled (was {date})'.format(
group=group.name,
acronym=group.acronym,
type=group.type.slug.upper(),
description=description,
date=start_time.date().strftime('%Y-%m-%d'))
is_multi_day = session.meeting.session_set.with_current_status().filter(current_status='sched').count() > 1
template = 'meeting/interim_session_cancellation_notice.txt'
context = locals()
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc=cc_list)
def send_interim_minutes_reminder(meeting):
"""Sends an email reminding chairs to submit minutes of interim *meeting*"""
session = meeting.session_set.first()
group = session.group
(to_email, cc_list) = gather_address_lists('session_minutes_reminder',group=group)
from_email = '[email protected]'
subject = 'Action Required: Minutes from {group} ({acronym}) {type} Interim Meeting on {date}'.format(
group=group.name,
acronym=group.acronym,
type=group.type.slug.upper(),
date=meeting.date.strftime('%Y-%m-%d'))
template = 'meeting/interim_minutes_reminder.txt'
context = locals()
send_mail(None,
to_email,
from_email,
subject,
template,
context,
cc=cc_list)
def sessions_post_save(request, forms):
"""Helper function to perform various post save operations on each form of a
InterimSessionModelForm formset"""
for form in forms:
if not form.has_changed():
continue
if form.instance.pk is not None and not SchedulingEvent.objects.filter(session=form.instance).exists():
if not form.requires_approval:
status_id = 'scheda'
else:
status_id = 'apprw'
SchedulingEvent.objects.create(
session=form.instance,
status_id=status_id,
by=request.user.person,
)
if ('date' in form.changed_data) or ('time' in form.changed_data):
update_interim_session_assignment(form)
if 'agenda' in form.changed_data:
form.save_agenda()
def update_interim_session_assignment(form):
"""Helper function to create / update timeslot assigned to interim session"""
time = datetime.datetime.combine(
form.cleaned_data['date'],
form.cleaned_data['time'])
session = form.instance
if session.official_timeslotassignment():
slot = session.official_timeslotassignment().timeslot
slot.time = time
slot.duration = session.requested_duration
slot.save()
else:
slot = TimeSlot.objects.create(
meeting=session.meeting,
type_id='regular',
duration=session.requested_duration,
time=time)
SchedTimeSessAssignment.objects.create(
timeslot=slot,
session=session,
schedule=session.meeting.schedule)
def populate_important_dates(meeting):
assert ImportantDate.objects.filter(meeting=meeting).exists() is False
assert meeting.type_id=='ietf'
for datename in ImportantDateName.objects.filter(used=True):
ImportantDate.objects.create(meeting=meeting,name=datename,date=meeting.date+datetime.timedelta(days=datename.default_offset_days))
def update_important_dates(meeting):
assert meeting.type_id=='ietf'
for datename in ImportantDateName.objects.filter(used=True):
date = meeting.date+datetime.timedelta(days=datename.default_offset_days)
d = ImportantDate.objects.filter(meeting=meeting, name=datename).first()
if d:
d.date = date
d.save()
else:
ImportantDate.objects.create(meeting=meeting, name=datename, date=date)
|
the-stack_0_27922
|
#*****************************************************#
# This file is part of GRIDOPT. #
# #
# Copyright (c) 2015, Tomas Tinoco De Rubira. #
# #
# GRIDOPT is released under the BSD 2-clause license. #
#*****************************************************#
from __future__ import print_function
import time
import numpy as np
from .method_error import *
from .method import PFmethod
from numpy.linalg import norm
class ACPF(PFmethod):
"""
AC power flow method.
"""
CONTROL_MODE_LOCKED = 'locked'
CONTROL_MODE_FREE = 'free'
CONTROL_MODE_REG = 'regulating'
name = 'ACPF'
_parameters = {'weight_vmag': 1e0, # weight for voltage magnitude regularization
'weight_vang': 1e-3, # weight for angle difference regularization
'weight_powers': 1e-3, # weight for gen powers regularization
'weight_controls': 1e0, # weight for control deviation penalty
'weight_var': 1e-5, # weight for general variable regularization
'v_min_clip': 0.5, # lower v threshold for clipping
'v_max_clip': 1.5, # upper v threshold for clipping
'v_limits': False, # voltage magnitude limits
'Q_limits': True, # flag for enforcing generator, VSC and FACTS reactive power limits
'Q_mode': 'regulating', # reactive power mode: free, regulating
'shunt_limits': True, # flag for enforcing switched shunt susceptance limits
'shunt_mode': 'locked', # switched shunts mode: locked, free, regulating
'tap_limits': True, # flag for enforcing transformer tap ratio limits
'tap_mode': 'locked', # transformer tap ratio mode: locked, free, regulating
'lock_vsc_P_dc': True, # flag for locking vsc P dc
'lock_csc_P_dc': True, # flag for locking csc P dc
'lock_csc_i_dc': True, # flag for locking csc i dc
'vdep_loads': False, # flag for modeling voltage dependent loads
'pvpq_start_k': 0, # start iteration number for PVPQ switching heuristics
'vmin_thresh': 0.1, # minimum voltage magnitude threshold
'gens_redispatch': False, # flag for allowing active power redispatch
'shunts_round': True, # flag for rounding discrete switched shunt susceptances (not supported yet)
'taps_round': True, # flag for rounding discrete transformer tap ratios (not supported yet)
'v_mag_warm_ref': False, # flag for using current v_mag as reference in v_mag regularization
'solver': 'nr', # OPTALG optimization solver: augl, ipopt, nr, inlp
'tap_step': 0.5, # tap ratio acceleration factor (NR only)
'shunt_step': 0.5, # susceptance acceleration factor (NR only)
'dtap': 1e-4, # tap ratio perturbation (NR only)
'dsus': 1e-4} # susceptance perturbation (NR only)
_parameters_augl = {'feastol' : 1e-4,
'optol' : 1e0,
'kappa' : 1e-5,
'theta_max': 1e-6,
'sigma_init_max': 1e9}
_parameters_ipopt = {}
_parameters_inlp = {'feastol' : 1e-4,
'optol' : 1e0}
_parameters_nr = {}
def __init__(self):
from optalg.opt_solver import OptSolverAugL, OptSolverIpopt, OptSolverNR, OptSolverINLP
# Parent init
PFmethod.__init__(self)
# Solver params
augl_params = OptSolverAugL.parameters.copy()
augl_params.update(self._parameters_augl) # overwrite defaults
ipopt_params = OptSolverIpopt.parameters.copy()
ipopt_params.update(self._parameters_ipopt) # overwrite defaults
inlp_params = OptSolverINLP.parameters.copy()
inlp_params.update(self._parameters_inlp) # overwrite defaults
nr_params = OptSolverNR.parameters.copy()
nr_params.update(self._parameters_nr) # overwrite defaults
self._parameters = ACPF._parameters.copy()
self._parameters['solver_parameters'] = {'augl': augl_params,
'ipopt': ipopt_params,
'nr': nr_params,
'inlp': inlp_params}
def create_problem(self, net):
solver_name = self._parameters['solver']
if solver_name == 'nr':
return self.create_problem_nr(net)
else:
return self.create_problem_opt(net)
def create_problem_nr(self, net):
import pfnet
# Parameters
params = self._parameters
Q_mode = params['Q_mode']
Q_limits = params['Q_limits']
shunt_mode = params['shunt_mode']
shunt_limits = params['shunt_limits']
tap_mode = params['tap_mode']
tap_limits = params['tap_limits']
lock_vsc_P_dc = params['lock_vsc_P_dc']
lock_csc_P_dc = params['lock_csc_P_dc']
lock_csc_i_dc = params['lock_csc_i_dc']
vdep_loads = params['vdep_loads']
gens_redispatch = params['gens_redispatch']
# Check shunt options
if shunt_mode not in [self.CONTROL_MODE_LOCKED,
self.CONTROL_MODE_REG]:
raise ValueError('invalid shunts mode')
if shunt_mode == self.CONTROL_MODE_REG and not shunt_limits:
raise ValueError('unsupported shunts configuration')
# Check tap options
if tap_mode not in [self.CONTROL_MODE_LOCKED,
self.CONTROL_MODE_REG]:
raise ValueError('invalid taps mode')
if tap_mode == self.CONTROL_MODE_REG and not tap_limits:
raise ValueError('unsupported taps configuration')
# Check Q options
if Q_mode != self.CONTROL_MODE_REG:
raise ValueError('invalid reactive power mode')
# Check other options
if gens_redispatch:
raise ValueError('generation redispatch not supported')
if not lock_vsc_P_dc:
raise ValueError('VSC P DC must be locked')
if not lock_csc_P_dc:
raise ValueError('CSC P DC must be locked')
if not lock_csc_i_dc:
raise ValueError('CSC i DC must be locked')
# Clear flags
net.clear_flags()
# Buses
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
# Generators
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
# VSC HVDC
net.set_flags('vsc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
# CSC HVDC
net.set_flags('csc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
# DC buses
net.set_flags('dc bus',
'variable',
'any',
'voltage')
# FACTS
net.set_flags('facts',
'variable',
'any',
'all')
# Loads
if vdep_loads:
for load in net.loads:
if load.is_voltage_dependent() and load.is_in_service():
net.set_flags_of_component(load,
'variable',
['active power', 'reactive power'])
# Tap changers
if tap_mode != self.CONTROL_MODE_LOCKED:
net.set_flags('branch',
['variable', 'fixed'],
'tap changer - v',
'tap ratio')
# Switched shunts
if shunt_mode != self.CONTROL_MODE_LOCKED:
net.set_flags('shunt',
['variable', 'fixed'],
'switching - v',
'susceptance')
# Set up problem
problem = pfnet.Problem(net)
problem.add_constraint(pfnet.Constraint('AC power balance', net))
problem.add_constraint(pfnet.Constraint('HVDC power balance', net))
problem.add_constraint(pfnet.Constraint('generator active power participation', net))
problem.add_constraint(pfnet.Constraint('variable fixing', net))
problem.add_constraint(pfnet.Constraint('VSC converter equations', net))
problem.add_constraint(pfnet.Constraint('CSC converter equations', net))
problem.add_constraint(pfnet.Constraint('FACTS equations', net))
problem.add_constraint(pfnet.Constraint('VSC DC voltage control', net))
problem.add_constraint(pfnet.Constraint('CSC DC voltage control', net))
problem.add_constraint(pfnet.Constraint('VSC DC power control', net))
problem.add_constraint(pfnet.Constraint('CSC DC power control', net))
problem.add_constraint(pfnet.Constraint('CSC DC current control', net))
problem.add_constraint(pfnet.Constraint('PVPQ switching', net))
problem.add_constraint(pfnet.Constraint('switching power factor regulation', net))
problem.add_constraint(pfnet.Constraint('switching FACTS active power control', net))
problem.add_constraint(pfnet.Constraint('switching FACTS reactive power control', net))
if vdep_loads:
problem.add_constraint(pfnet.Constraint('load voltage dependence', net))
if Q_limits:
problem.add_heuristic(pfnet.Heuristic('PVPQ switching', net))
problem.add_heuristic(pfnet.Heuristic('switching power factor regulation', net))
problem.analyze()
# Check
if (problem.J.shape[0] + problem.A.shape[0] != problem.get_num_primal_variables()):
raise PFmethodError_BadProblem()
# Return
return problem
def create_problem_opt(self, net):
import pfnet
# Parameters
params = self._parameters
wm = params['weight_vmag']
wa = params['weight_vang']
wp = params['weight_powers']
wc = params['weight_controls']
wv = params['weight_var']
v_limits = params['v_limits']
Q_mode = params['Q_mode']
Q_limits = params['Q_limits']
shunt_mode = params['shunt_mode']
shunt_limits = params['shunt_limits']
tap_mode = params['tap_mode']
tap_limits = params['tap_limits']
lock_vsc_P_dc = params['lock_vsc_P_dc']
lock_csc_P_dc = params['lock_csc_P_dc']
lock_csc_i_dc = params['lock_csc_i_dc']
vdep_loads = params['vdep_loads']
v_mag_warm_ref = params['v_mag_warm_ref']
gens_redispatch = params['gens_redispatch']
# Check shunt options
if shunt_mode not in [self.CONTROL_MODE_LOCKED,
self.CONTROL_MODE_FREE,
self.CONTROL_MODE_REG]:
raise ValueError('invalid shunts mode')
if shunt_mode == self.CONTROL_MODE_REG and not shunt_limits:
raise ValueError('unsupported shunts configuration')
# Check tap options
if tap_mode not in [self.CONTROL_MODE_LOCKED,
self.CONTROL_MODE_FREE,
self.CONTROL_MODE_REG]:
raise ValueError('invalid taps mode')
if tap_mode == self.CONTROL_MODE_REG and not tap_limits:
raise ValueError('unsupported taps configuration')
# Check Q options
if Q_mode not in [self.CONTROL_MODE_REG,
self.CONTROL_MODE_FREE]:
raise ValueError('invalid reactive power mode')
# Clear flags
net.clear_flags()
# Buses
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
if Q_mode == self.CONTROL_MODE_REG and not Q_limits:
net.set_flags('bus',
'fixed',
'v set regulated',
'voltage magnitude')
if v_limits:
net.set_flags('bus',
'bounded',
'any',
'voltage magnitude')
# Genertors
if gens_redispatch:
net.set_flags('generator',
['variable', 'bounded'],
'any',
'active power')
else:
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
if Q_mode == self.CONTROL_MODE_FREE and Q_limits:
net.set_flags('generator',
'bounded',
'regulator',
'reactive power')
# Loads
if vdep_loads:
for load in net.loads:
if load.is_voltage_dependent() and load.is_in_service():
net.set_flags_of_component(load,
'variable',
['active power', 'reactive power'])
# VSC HVDC
net.set_flags('vsc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
if Q_mode == self.CONTROL_MODE_FREE and Q_limits:
net.set_flags('vsc converter',
'bounded',
'any',
'reactive power')
# CSC HVDC
net.set_flags('csc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
# DC buses
net.set_flags('dc bus',
'variable',
'any',
'voltage')
# FACTS
net.set_flags('facts',
'variable',
'any',
'all')
if Q_mode == self.CONTROL_MODE_FREE and Q_limits:
net.set_flags('facts',
'bounded',
'any',
'reactive power')
# Tap changers
if tap_mode != self.CONTROL_MODE_LOCKED:
net.set_flags('branch',
'variable',
'tap changer - v',
'tap ratio')
if tap_mode == self.CONTROL_MODE_FREE and tap_limits:
net.set_flags('branch',
'bounded',
'tap changer - v',
'tap ratio')
# Swtiched shunts
if shunt_mode != self.CONTROL_MODE_LOCKED:
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
if shunt_mode == self.CONTROL_MODE_FREE and shunt_limits:
net.set_flags('shunt',
'bounded',
'switching - v',
'susceptance')
# Set up problem
problem = pfnet.Problem(net)
problem.add_constraint(pfnet.Constraint('AC power balance', net))
problem.add_constraint(pfnet.Constraint('HVDC power balance', net))
problem.add_constraint(pfnet.Constraint('generator active power participation', net))
problem.add_constraint(pfnet.Constraint('VSC converter equations', net))
problem.add_constraint(pfnet.Constraint('CSC converter equations', net))
problem.add_constraint(pfnet.Constraint('FACTS equations', net))
problem.add_constraint(pfnet.Constraint('VSC DC voltage control', net))
problem.add_constraint(pfnet.Constraint('CSC DC voltage control', net))
problem.add_constraint(pfnet.Constraint('power factor regulation', net))
if lock_vsc_P_dc:
problem.add_constraint(pfnet.Constraint('VSC DC power control', net))
if lock_csc_P_dc:
problem.add_constraint(pfnet.Constraint('CSC DC power control', net))
if lock_csc_i_dc:
problem.add_constraint(pfnet.Constraint('CSC DC current control', net))
func = pfnet.Function('voltage magnitude regularization', wm/(net.get_num_buses(True)+1.), net)
func.set_parameter('v_set_reference', not v_mag_warm_ref)
problem.add_function(func)
problem.add_function(pfnet.Function('variable regularization', wv/(net.num_vars+1.), net))
problem.add_function(pfnet.Function('voltage angle regularization', wa/(net.get_num_buses(True)+1.), net))
problem.add_function(pfnet.Function('generator powers regularization', wp/(net.get_num_generators(True)+1.), net))
problem.add_function(pfnet.Function('VSC DC power control', wc/(net.get_num_vsc_converters(True)+1.), net))
problem.add_function(pfnet.Function('CSC DC power control', wc/(net.get_num_csc_converters(True)+1.), net))
problem.add_function(pfnet.Function('CSC DC current control', wc/(net.get_num_csc_converters(True)+1.), net))
problem.add_function(pfnet.Function('FACTS active power control', wc/(net.get_num_facts(True)+1.), net))
problem.add_function(pfnet.Function('FACTS reactive power control', wc/(net.get_num_facts(True)+1.), net))
if gens_redispatch:
problem.add_function(pfnet.Function('generation redispatch penalty', wc/(net.get_num_generators(True)+1.), net))
if Q_mode == self.CONTROL_MODE_REG and Q_limits:
problem.add_constraint(pfnet.Constraint('voltage set point regulation', net))
if net.num_fixed > 0:
problem.add_constraint(pfnet.Constraint('variable fixing', net))
if tap_mode != self.CONTROL_MODE_LOCKED:
problem.add_function(pfnet.Function('tap ratio regularization', wc/(net.get_num_tap_changers_v(True)+1.), net))
if tap_mode == self.CONTROL_MODE_REG and tap_limits:
problem.add_constraint(pfnet.Constraint('voltage regulation by transformers', net))
if shunt_mode != self.CONTROL_MODE_LOCKED:
problem.add_function(pfnet.Function('susceptance regularization', wc/(net.get_num_switched_v_shunts(True)+1.), net))
if shunt_mode == self.CONTROL_MODE_REG and shunt_limits:
problem.add_constraint(pfnet.Constraint('voltage regulation by shunts', net))
if vdep_loads:
problem.add_constraint(pfnet.Constraint('load voltage dependence', net))
if net.num_bounded > 0:
problem.add_constraint(pfnet.Constraint('variable bounds', net))
# Analyze
problem.analyze()
# Return
return problem
def solve(self, net, save_problem=False):
from optalg.opt_solver import OptSolverError, OptTermination, OptCallback
from optalg.opt_solver import OptSolverAugL, OptSolverIpopt, OptSolverNR, OptSolverINLP
# Parameters
params = self._parameters
Q_mode = params['Q_mode']
shunt_mode = params['shunt_mode']
shunts_round = params['shunts_round']
tap_mode = params['tap_mode']
taps_round = params['taps_round']
vmin_thresh = params['vmin_thresh']
solver_name = params['solver']
solver_params = params['solver_parameters']
v_min_clip = params['v_min_clip']
v_max_clip = params['v_max_clip']
# Opt solver
if solver_name == 'augl':
solver = OptSolverAugL()
elif solver_name == 'ipopt':
solver = OptSolverIpopt()
elif solver_name == 'inlp':
solver = OptSolverINLP()
elif solver_name == 'nr':
solver = OptSolverNR()
else:
raise PFmethodError_BadOptSolver()
solver.set_parameters(solver_params[solver_name])
# Copy network
net = net.get_copy(merge_buses=True)
self.set_network_snapshot(net)
# Clipping
for bus in net.buses:
bus.v_mag = np.minimum(np.maximum(bus.v_mag, v_min_clip), v_max_clip)
# Problem
t0 = time.time()
problem = self.create_problem(net)
problem_time = time.time()-t0
# Callbacks
def c1(s):
if (s.k != 0 and params['tap_limits'] and tap_mode == self.CONTROL_MODE_REG and
norm(s.problem.f, np.inf) < 100.*solver_params['nr']['feastol']):
try:
self.apply_tran_v_regulation(s)
except Exception as e:
raise PFmethodError_TranVReg(e)
def c2(s):
if (s.k != 0 and params['shunt_limits'] and shunt_mode == self.CONTROL_MODE_REG and
norm(s.problem.f, np.inf) < 100.*solver_params['nr']['feastol']):
try:
self.apply_shunt_v_regulation(s)
except Exception as e:
raise PFmethodError_ShuntVReg(e)
def c3(s):
if (s.k >= params['pvpq_start_k'] and params['Q_limits'] and Q_mode == self.CONTROL_MODE_REG):
prob = s.problem.wrapped_problem
prob.apply_heuristics(s.x)
s.problem.A = prob.A
s.problem.b = prob.b
if solver_name == 'nr':
solver.add_callback(OptCallback(c1))
solver.add_callback(OptCallback(c2))
solver.add_callback(OptCallback(c3))
# Termination
def t1(s):
if np.min(s.problem.wrapped_problem.network.bus_v_min) < vmin_thresh:
return True
else:
return False
solver.add_termination(OptTermination(t1, 'low voltage'))
# Info printer
info_printer = self.get_info_printer()
solver.set_info_printer(info_printer)
# Solve
update = True
t0 = time.time()
try:
solver.solve(problem)
except OptSolverError as e:
raise PFmethodError_SolverError(e)
except Exception as e:
update = False
raise e
finally:
# Update network
if update:
net.set_var_values(solver.get_primal_variables()[:net.num_vars])
net.update_properties()
net.clear_sensitivities()
if solver_name != 'nr':
problem.store_sensitivities(*solver.get_dual_variables())
# Save results
self.set_solver_name(solver_name)
self.set_solver_status(solver.get_status())
self.set_solver_message(solver.get_error_msg())
self.set_solver_iterations(solver.get_iterations())
self.set_solver_time(time.time()-t0)
self.set_solver_primal_variables(solver.get_primal_variables())
self.set_solver_dual_variables(solver.get_dual_variables())
self.set_problem(problem if save_problem else None)
self.set_problem_time(problem_time)
self.set_network_snapshot(net)
def get_info_printer(self):
# Parameters
solver_name = self._parameters['solver']
# Define
def info_printer(solver,header):
net = solver.problem.wrapped_problem.network
if header:
print('{0:^5}'.format('vmax'), end=' ')
print('{0:^5}'.format('vmin'), end=' ')
print('{0:^8}'.format('gvdev'), end=' ')
print('{0:^8}'.format('gQvio'))
else:
print('{0:^5.2f}'.format(np.average(net.bus_v_max)), end=' ')
print('{0:^5.2f}'.format(np.average(net.bus_v_min)), end=' ')
print('{0:^8.1e}'.format(np.average(net.gen_v_dev)), end=' ')
print('{0:^8.1e}'.format(np.average(net.gen_Q_vio)))
# Return
return info_printer
def apply_shunt_v_regulation(self,solver):
# Local variables
dsus = self._parameters['dsus']
step = self._parameters['shunt_step']
p = solver.problem.wrapped_problem
net = p.network
x = solver.x
eps = 1e-8
# Fix constraints
c = p.find_constraint('variable fixing')
A = c.A
b = c.b
# Rhs
rhs = np.hstack((np.zeros(p.f.size),np.zeros(p.b.size)))
# Offset
offset = 0
for c in p.constraints:
if c.name == 'variable fixing':
break
else:
offset += c.A.shape[0]
# Violation check
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_shunt(True) and not bus.is_slack():
assert(bus.has_flags('variable','voltage magnitude'))
for t in range(net.num_periods):
v = x[bus.index_v_mag[t]]
vmax = bus.v_max_reg
vmin = bus.v_min_reg
assert(len(bus.reg_shunts) > 0)
assert(vmax >= vmin)
# Violation
if v > vmax or v < vmin:
for reg_shunt in bus.reg_shunts:
if not reg_shunt.is_in_service():
continue
assert(reg_shunt.has_flags('variable','susceptance'))
s = x[reg_shunt.index_b[t]]
smax = reg_shunt.b_max
smin = reg_shunt.b_min
assert(smin <= smax)
# Fix constr index
k = int(np.where(A.col == reg_shunt.index_b[t])[0])
i = A.row[k]
assert(np.abs(b[i]-x[reg_shunt.index_b[t]]) < eps)
assert(A.data[k] == 1.)
# Sensitivity
assert(rhs[p.f.size+offset+i] == 0.)
rhs[p.f.size+offset+i] = dsus
dx = solver.linsolver.solve(rhs)
dv = dx[bus.index_v_mag[t]]
dvds = dv/dsus
rhs[p.f.size+offset+i] = 0.
# Adjustment
dv = (vmax+vmin)/2.-v
ds = step*dv/dvds if dvds != 0. else 0.
snew = np.maximum(np.minimum(s+ds,smax),smin)
x[reg_shunt.index_b[t]] = snew
b[i] = snew
if np.abs(snew-s) > eps:
break
# Update
solver.func(x)
p.update_lin()
solver.problem.A = p.A
solver.problem.b = p.b
def apply_tran_v_regulation(self,solver):
# Local variables
dtap = self._parameters['dtap']
step = self._parameters['tap_step']
p = solver.problem.wrapped_problem
net = p.network
x = solver.x
eps = 1e-8
# Fix constraints
c = p.find_constraint('variable fixing')
A = c.A
b = c.b
# Rhs
rhs = np.hstack((np.zeros(p.f.size),np.zeros(p.b.size)))
# Offset
offset = 0
for c in p.constraints:
if c.name == 'variable fixing':
break
else:
offset += c.A.shape[0]
# Violation check
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_tran(True) and not bus.is_slack():
assert(bus.has_flags('variable','voltage magnitude'))
for tau in range(net.num_periods):
v = x[bus.index_v_mag[tau]]
vmax = bus.v_max_reg
vmin = bus.v_min_reg
assert(len(bus.reg_trans) > 0)
assert(vmax > vmin)
# Violation
if v > vmax or v < vmin:
for reg_tran in bus.reg_trans:
if not reg_tran.is_in_service():
continue
assert(reg_tran.has_flags('variable','tap ratio'))
t = x[reg_tran.index_ratio[tau]]
tmax = reg_tran.ratio_max
tmin = reg_tran.ratio_min
assert(tmax >= tmin)
# Fix constr index
k = int(np.where(A.col == reg_tran.index_ratio[tau])[0])
i = A.row[k]
assert(np.abs(b[i]-x[reg_tran.index_ratio[tau]]) < eps)
assert(A.data[k] == 1.)
# Sensitivity
assert(rhs[p.f.size+offset+i] == 0.)
rhs[p.f.size+offset+i] = dtap
dx = solver.linsolver.solve(rhs)
dv = dx[bus.index_v_mag[tau]]
dvdt = dv/dtap
rhs[p.f.size+offset+i] = 0.
# Adjustment
dv = (vmax+vmin)/2.-v
dt = step*dv/dvdt if dvdt != 0. else 0.
tnew = np.maximum(np.minimum(t+dt,tmax),tmin)
x[reg_tran.index_ratio[tau]] = tnew
b[i] = tnew
if np.abs(tnew-t) > eps:
break
# Update
solver.func(x)
p.update_lin()
solver.problem.A = p.A
solver.problem.b = p.b
|
the-stack_0_27925
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argument processors for Game Servers surface arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
DEFAULT_LOCATION = 'global'
PARENT_TEMPLATE = 'projects/{}/locations/{}'
PARENT_DEPLOYMENT_TEMPLATE = 'projects/{}/locations/{}/gameServerDeployments/{}'
PARENT_REALM_TEMPLATE = 'projects/{}/locations/{}/realms/{}'
DEPLOYMENT_WILDCARD = '-'
LOCATION_WILDCARD = '-'
REALM_WILDCARD = '-'
def AddDefaultLocationToListRequest(ref, args, req):
"""Python hook for yaml commands to wildcard the location in list requests."""
del ref # Unused
project = properties.VALUES.core.project.Get(required=True)
location = args.location or LOCATION_WILDCARD
req.parent = PARENT_TEMPLATE.format(project, location)
return req
def AddDefaultLocationAndRealmToListRequest(ref, args, req):
"""Python hook for yaml commands to wildcard the realm and location in list requests."""
del ref
project = properties.VALUES.core.project.Get(required=True)
location = args.location or LOCATION_WILDCARD
# If realm is specified but location is not, we fall back to global, which is
# the default location for realms.
if args.realm and not args.location:
location = DEFAULT_LOCATION
realm = args.realm or REALM_WILDCARD
req.parent = PARENT_REALM_TEMPLATE.format(project, location, realm)
return req
def AddDefaultLocationAndDeploymentToListRequest(ref, args, req):
"""Python hook for yaml commands to wildcard the deployment and location in list requests."""
del ref
project = properties.VALUES.core.project.Get(required=True)
location = args.location or LOCATION_WILDCARD
# If deployment is specified but location is not, we fall back to global
# which is the default location for realms.
if args.deployment and not args.location:
location = DEFAULT_LOCATION
deployment = args.deployment or DEPLOYMENT_WILDCARD
req.parent = PARENT_DEPLOYMENT_TEMPLATE.format(project, location, deployment)
return req
|
the-stack_0_27926
|
import os
import re
import socket
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
from netmiko.cisco_base_connection import CiscoFileTransfer
from netmiko.ssh_exception import NetmikoTimeoutException
LINUX_PROMPT_PRI = os.getenv("NETMIKO_LINUX_PROMPT_PRI", "$")
LINUX_PROMPT_ALT = os.getenv("NETMIKO_LINUX_PROMPT_ALT", "#")
LINUX_PROMPT_ROOT = os.getenv("NETMIKO_LINUX_PROMPT_ROOT", "#")
class LinuxSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
return super().session_preparation()
def _enter_shell(self):
"""Already in shell."""
return ""
def _return_cli(self):
"""The shell is the CLI."""
return ""
def disable_paging(self, *args, **kwargs):
"""Linux doesn't have paging by default."""
return ""
def set_base_prompt(
self,
pri_prompt_terminator=LINUX_PROMPT_PRI,
alt_prompt_terminator=LINUX_PROMPT_ALT,
delay_factor=1,
):
"""Determine base prompt."""
return super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):
"""Can't exit from root (if root)"""
if self.username == "root":
exit_config_mode = False
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def check_config_mode(self, check_string=LINUX_PROMPT_ROOT, pattern=""):
"""Verify root"""
return self.check_enable_mode(check_string=check_string)
def config_mode(
self,
config_command: str = "sudo -s",
pattern: str = "ssword",
re_flags: int = re.IGNORECASE,
) -> str:
"""Attempt to become root."""
return self.enable(cmd=config_command, pattern=pattern, re_flags=re_flags)
def exit_config_mode(self, exit_config="exit"):
return self.exit_enable_mode(exit_command=exit_config)
def check_enable_mode(self, check_string=LINUX_PROMPT_ROOT):
"""Verify root"""
return super().check_enable_mode(check_string=check_string)
def exit_enable_mode(self, exit_command="exit"):
"""Exit enable mode."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if self.check_enable_mode():
self.write_channel(self.normalize_cmd(exit_command))
time.sleep(0.3 * delay_factor)
self.set_base_prompt()
if self.check_enable_mode():
raise ValueError("Failed to exit enable mode.")
return output
def enable(self, cmd="sudo -s", pattern="ssword", re_flags=re.IGNORECASE):
"""Attempt to become root."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
time.sleep(0.3 * delay_factor)
try:
output += self.read_channel()
if re.search(pattern, output, flags=re_flags):
self.write_channel(self.normalize_cmd(self.secret))
self.set_base_prompt()
except socket.timeout:
raise NetmikoTimeoutException(
"Timed-out reading channel, data not available."
)
if not self.check_enable_mode():
msg = (
"Failed to enter enable mode. Please ensure you pass "
"the 'secret' argument to ConnectHandler."
)
raise ValueError(msg)
return output
def cleanup(self, command="exit"):
"""Try to Gracefully exit the SSH session."""
return super().cleanup(command=command)
def save_config(self, *args, **kwargs):
"""Not Implemented"""
raise NotImplementedError
class LinuxFileTransfer(CiscoFileTransfer):
"""
Linux SCP File Transfer driver.
Mostly for testing purposes.
"""
def __init__(
self,
ssh_conn,
source_file,
dest_file,
file_system="/var/tmp",
direction="put",
**kwargs,
):
return super().__init__(
ssh_conn=ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
**kwargs,
)
def remote_space_available(self, search_pattern=""):
"""Return space available on remote device."""
return self._remote_space_available_unix(search_pattern=search_pattern)
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
return self._check_file_exists_unix(remote_cmd=remote_cmd)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
return self._remote_file_size_unix(
remote_cmd=remote_cmd, remote_file=remote_file
)
def remote_md5(self, base_cmd="md5sum", remote_file=None):
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_md5_cmd = f"{base_cmd} {self.file_system}/{remote_file}"
dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, read_timeout=300)
dest_md5 = self.process_md5(dest_md5)
return dest_md5
@staticmethod
def process_md5(md5_output, pattern=r"^(\S+)\s+"):
return super(LinuxFileTransfer, LinuxFileTransfer).process_md5(
md5_output, pattern=pattern
)
def enable_scp(self, cmd=None):
raise NotImplementedError
def disable_scp(self, cmd=None):
raise NotImplementedError
|
the-stack_0_27927
|
import nbgrader, csv, codecs, sys, os, shutil
from nbgrader.apps import NbGraderAPI
import zipfile
import pandas as pd
import logging
logger = logging.getLogger('moodle_nbgrader')
logger.setLevel(logging.INFO)
def zip(out, root):
shutil.make_archive(out, 'zip', root)
def add_feedback_to_zip(archive, unique_id, ident, fullname, assignment):
fbk_path = os.path.join("feedback", str(unique_id), assignment)
try:
files = [os.path.join(fbk_path, f) for f in os.listdir(fbk_path) if f.endswith('.html')]
assign_id = ident.strip('Participant ')
# remove asterisks
name = 'blank'
# create the path to the feedback file
for f in files:
archive.write(f, arcname=os.path.join(f"{fullname}_{assign_id}_assignsubmission_file_", os.path.basename(f)))
except FileNotFoundError:
logger.error(f"HTML feedback file for {fullname} {unique_id} {assignment} is missing")
# no feedback to generate
def update_grade(out_df, index, unique_id, fullname, submission):
out_df.loc[index, 'Grade'] = submission.score
# warn about dubious scores
if submission.score <= 0 or submission.score > submission.max_score:
logger.warning(f"Warning: {unique_id} {fullname} has a score of {submission.score}")
# correct the maximum grade
out_df.loc[index, 'Maximum Grade'] = submission.max_score
def moodle_gradesheet(assignment, with_feedback=True):
api = NbGraderAPI()
gradebook = api.gradebook
csvfile = os.path.join("imports", assignment + ".csv")
grading_df = pd.read_csv(csvfile)
fname = os.path.join("exports", assignment + ".csv")
if with_feedback:
archive = zipfile.ZipFile(os.path.join("exports", "feedback_"+assignment+".zip"), 'w', zipfile.ZIP_DEFLATED)
out_df = grading_df.copy()
# these are the students that are grouped
grading_df['actual_group'] = grading_df['Group']
individuals = (grading_df['Department'] == grading_df['Group']) | (grading_df['Group'] == 'Default group')
grading_df.loc[individuals, 'actual_group'] = grading_df.loc[individuals, 'Identifier']
n_groups = len(grading_df['actual_group'].unique())
for index, line in grading_df.drop_duplicates(subset='actual_group').iterrows():
if line['actual_group'] == 'Default Group':
continue
try:
submission = gradebook.find_submission(assignment, line['ID number'])
except:
if "Submitted" in line['Status']:
logger.warning(f"No submission for {line['ID number']} in assignment {assignment}")
else:
logger.info(f"No submission for {line['ID number']} in assignment {assignment}, as expected")
else:
logger.info(f"Processing submission for {line['ID number']} in assignment {assignment}")
# add feedback and grading to all students with the same submission in the same group
for group_index, group_line in grading_df[grading_df['actual_group'] == line['actual_group']].iterrows():
if with_feedback:
add_feedback_to_zip(archive, line['ID number'], group_line['Identifier'], group_line['Full name'], assignment)
update_grade(out_df, group_index, group_line['ID number'], group_line['Full name'], submission)
out_df.to_csv(fname, index=False)
logger.info(f"Wrote to {fname}")
# tidy up the feedback file
if with_feedback:
archive.close()
import argparse
parser = argparse.ArgumentParser(description='''
Updates a CSV file gradesheet (which must have be downloaded from
Moodle with "offline gradesheets" enabled in the assignment settings) with
the results from grading the assignment <assign>.
The input will be imports/<assign>.csv
The output will be in exports/<assign>.csv
Feedback will be zipped up into the file exports/<assign>_feedback.zip and this
can be uploaded to Moodle if "Feedback files" is enabled. This uploads all student
feedback in one go.
''')
parser.add_argument('assignment', type=str, help='Name of the assignment csv file downloaded from moodle')
args = parser.parse_args()
moodle_gradesheet(args.assignment)
|
the-stack_0_27929
|
import os
from unittest import SkipTest
import param
import holoviews
from pyviz_comms import nb_mime_js
from IPython import version_info
from param import ipython as param_ext
from IPython.display import HTML, publish_display_data
from ..core.dimension import LabelledData
from ..core.tree import AttrTree
from ..core.options import Store
from ..core.util import mimebundle_to_html
from ..element.comparison import ComparisonTestCase
from ..util import extension
from ..plotting.renderer import Renderer, MIME_TYPES
from .magics import load_magics
from .display_hooks import display # noqa (API import)
from .display_hooks import pprint_display, png_display, svg_display
AttrTree._disabled_prefixes = ['_repr_','_ipython_canary_method_should_not_exist']
def show_traceback():
"""
Display the full traceback after an abbreviated traceback has occurred.
"""
from .display_hooks import FULL_TRACEBACK
print(FULL_TRACEBACK)
class IPTestCase(ComparisonTestCase):
"""
This class extends ComparisonTestCase to handle IPython specific
objects and support the execution of cells and magic.
"""
def setUp(self):
super(IPTestCase, self).setUp()
try:
import IPython
from IPython.display import HTML, SVG
self.ip = IPython.InteractiveShell()
if self.ip is None:
raise TypeError()
except Exception:
raise SkipTest("IPython could not be started")
self.addTypeEqualityFunc(HTML, self.skip_comparison)
self.addTypeEqualityFunc(SVG, self.skip_comparison)
def skip_comparison(self, obj1, obj2, msg): pass
def get_object(self, name):
obj = self.ip._object_find(name).obj
if obj is None:
raise self.failureException("Could not find object %s" % name)
return obj
def cell(self, line):
"Run an IPython cell"
self.ip.run_cell(line, silent=True)
def cell_magic(self, *args, **kwargs):
"Run an IPython cell magic"
self.ip.run_cell_magic(*args, **kwargs)
def line_magic(self, *args, **kwargs):
"Run an IPython line magic"
self.ip.run_line_magic(*args, **kwargs)
class notebook_extension(extension):
"""
Notebook specific extension to hv.extension that offers options for
controlling the notebook environment.
"""
css = param.String(default='', doc="Optional CSS rule set to apply to the notebook.")
logo = param.Boolean(default=True, doc="Toggles display of HoloViews logo")
inline = param.Boolean(default=True, doc="""
Whether to inline JS and CSS resources.
If disabled, resources are loaded from CDN if one is available.""")
width = param.Number(default=None, bounds=(0, 100), doc="""
Width of the notebook as a percentage of the browser screen window width.""")
display_formats = param.List(default=['html'], doc="""
A list of formats that are rendered to the notebook where
multiple formats may be selected at once (although only one
format will be displayed).
Although the 'html' format is supported across backends, other
formats supported by the current backend (e.g 'png' and 'svg'
using the matplotlib backend) may be used. This may be useful to
export figures to other formats such as PDF with nbconvert. """)
case_sensitive_completion = param.Boolean(default=False, doc="""
Whether to monkey patch IPython to use the correct tab-completion
behavior. """)
_loaded = False
def __call__(self, *args, **params):
super(notebook_extension, self).__call__(*args, **params)
# Abort if IPython not found
try:
ip = params.pop('ip', None) or get_ipython() # noqa (get_ipython)
except:
return
# Notebook archive relies on display hooks being set to work.
try:
if version_info[0] >= 4:
import nbformat # noqa (ensures availability)
else:
from IPython import nbformat # noqa (ensures availability)
from .archive import notebook_archive
holoviews.archive = notebook_archive
except ImportError:
pass
# Not quite right, should be set when switching backends
if 'matplotlib' in Store.renderers and not notebook_extension._loaded:
svg_exporter = Store.renderers['matplotlib'].instance(holomap=None,fig='svg')
holoviews.archive.exporters = [svg_exporter] + holoviews.archive.exporters
p = param.ParamOverrides(self, {k:v for k,v in params.items() if k!='config'})
if p.case_sensitive_completion:
from IPython.core import completer
completer.completions_sorting_key = self.completions_sorting_key
resources = self._get_resources(args, params)
Store.display_formats = p.display_formats
if 'html' not in p.display_formats and len(p.display_formats) > 1:
msg = ('Output magic unable to control displayed format '
'as IPython notebook uses fixed precedence '
'between %r' % p.display_formats)
display(HTML('<b>Warning</b>: %s' % msg))
loaded = notebook_extension._loaded
if loaded == False:
param_ext.load_ipython_extension(ip, verbose=False)
load_magics(ip)
Store.output_settings.initialize(list(Store.renderers.keys()))
Store.set_display_hook('html+js', LabelledData, pprint_display)
Store.set_display_hook('png', LabelledData, png_display)
Store.set_display_hook('svg', LabelledData, svg_display)
notebook_extension._loaded = True
css = ''
if p.width is not None:
css += '<style>div.container { width: %s%% }</style>' % p.width
if p.css:
css += '<style>%s</style>' % p.css
if css:
display(HTML(css))
resources = list(resources)
if len(resources) == 0: return
Renderer.load_nb()
for r in [r for r in resources if r != 'holoviews']:
Store.renderers[r].load_nb(inline=p.inline)
if hasattr(ip, 'kernel') and not loaded:
Renderer.comm_manager.get_client_comm(notebook_extension._process_comm_msg,
"hv-extension-comm")
# Create a message for the logo (if shown)
self.load_hvjs(logo=p.logo,
bokeh_logo= p.logo and ('bokeh' in resources),
mpl_logo= p.logo and (('matplotlib' in resources)
or resources==['holoviews']),
plotly_logo= p.logo and ('plotly' in resources),
JS=('holoviews' in resources))
@classmethod
def completions_sorting_key(cls, word):
"Fixed version of IPyton.completer.completions_sorting_key"
prio1, prio2 = 0, 0
if word.startswith('__'): prio1 = 2
elif word.startswith('_'): prio1 = 1
if word.endswith('='): prio1 = -1
if word.startswith('%%'):
if not "%" in word[2:]:
word = word[2:]; prio2 = 2
elif word.startswith('%'):
if not "%" in word[1:]:
word = word[1:]; prio2 = 1
return prio1, word, prio2
def _get_resources(self, args, params):
"""
Finds the list of resources from the keyword parameters and pops
them out of the params dictionary.
"""
resources = []
disabled = []
for resource in ['holoviews'] + list(Store.renderers.keys()):
if resource in args:
resources.append(resource)
if resource in params:
setting = params.pop(resource)
if setting is True and resource != 'matplotlib':
if resource not in resources:
resources.append(resource)
if setting is False:
disabled.append(resource)
unmatched_args = set(args) - set(resources)
if unmatched_args:
display(HTML('<b>Warning:</b> Unrecognized resources %s'
% ', '.join(unmatched_args)))
resources = [r for r in resources if r not in disabled]
if ('holoviews' not in disabled) and ('holoviews' not in resources):
resources = ['holoviews'] + resources
return resources
@classmethod
def load_hvjs(cls, logo=False, bokeh_logo=False, mpl_logo=False, plotly_logo=False,
JS=True, message='HoloViewsJS successfully loaded.'):
"""
Displays javascript and CSS to initialize HoloViews widgets.
"""
import jinja2
# Evaluate load_notebook.html template with widgetjs code
if JS:
widgetjs, widgetcss = Renderer.html_assets(extras=False, backends=[], script=True)
else:
widgetjs, widgetcss = '', ''
# Add classic notebook MIME renderer
widgetjs += nb_mime_js
templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
jinjaEnv = jinja2.Environment(loader=templateLoader)
template = jinjaEnv.get_template('load_notebook.html')
html = template.render({'widgetcss': widgetcss,
'logo': logo,
'bokeh_logo': bokeh_logo,
'mpl_logo': mpl_logo,
'plotly_logo': plotly_logo,
'message': message})
publish_display_data(data={'text/html': html})
# Vanilla JS mime type is only consumed by classic notebook
# Custom mime type is only consumed by JupyterLab
if JS:
mimebundle = {
MIME_TYPES['js'] : widgetjs,
MIME_TYPES['jlab-hv-load'] : widgetjs
}
if os.environ.get('HV_DOC_HTML', False):
mimebundle = {'text/html': mimebundle_to_html(mimebundle)}
publish_display_data(data=mimebundle)
@param.parameterized.bothmethod
def tab_completion_docstring(self_or_cls):
"""
Generates a docstring that can be used to enable tab-completion
of resources.
"""
elements = ['%s=Boolean' %k for k in list(Store.renderers.keys())]
for name, p in self_or_cls.params().items():
param_type = p.__class__.__name__
elements.append("%s=%s" % (name, param_type))
return "params(%s)" % ', '.join(['holoviews=Boolean'] + elements)
notebook_extension.__doc__ = notebook_extension.tab_completion_docstring()
notebook_extension.add_delete_action(Renderer._delete_plot)
def load_ipython_extension(ip):
notebook_extension(ip=ip)
def unload_ipython_extension(ip):
notebook_extension._loaded = False
|
the-stack_0_27930
|
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=45)
photo = models.ImageField(upload_to='admin/')
gender_select = (
('male', 'Male'),
('female', 'Female')
)
gender = models.CharField(choices=gender_select, max_length=6)
employee_select = (
('admin', 'Admin'),
('professor', 'Professor'),
('teacher', 'Teacher'),
('register', 'Register'),
('student', 'Student'),
)
employee_type = models.CharField(choices=employee_select, max_length=15)
def __str__(self):
return self.name
|
the-stack_0_27931
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__all__ = [
"ImproperlyConfigured",
"ElasticsearchException",
"SerializationError",
"TransportError",
"NotFoundError",
"ConflictError",
"RequestError",
"ConnectionError",
"SSLError",
"ConnectionTimeout",
"AuthenticationException",
"AuthorizationException",
]
class ImproperlyConfigured(Exception):
"""
Exception raised when the config passed to the client is inconsistent or invalid.
"""
class ElasticsearchException(Exception):
"""
Base class for all exceptions raised by this package's operations (doesn't
apply to :class:`~elasticsearch.ImproperlyConfigured`).
"""
class SerializationError(ElasticsearchException):
"""
Data passed in failed to serialize properly in the ``Serializer`` being
used.
"""
class TransportError(ElasticsearchException):
"""
Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when
an actual connection error happens; in that case the ``status_code`` will
be set to ``'N/A'``.
"""
@property
def status_code(self):
"""
The HTTP status code of the response that precipitated the error or
``'N/A'`` if not applicable.
"""
return self.args[0]
@property
def error(self):
"""A string error message."""
return self.args[1]
@property
def info(self):
"""
Dict of returned error info from ES, where available, underlying
exception when not.
"""
return self.args[2]
def __str__(self):
cause = ""
try:
if self.info and "error" in self.info:
if isinstance(self.info["error"], dict):
root_cause = self.info["error"]["root_cause"][0]
cause = ", ".join(
filter(
None,
[
repr(root_cause["reason"]),
root_cause.get("resource.id"),
root_cause.get("resource.type"),
],
)
)
else:
cause = repr(self.info["error"])
except LookupError:
pass
msg = ", ".join(filter(None, [str(self.status_code), repr(self.error), cause]))
return "%s(%s)" % (self.__class__.__name__, msg)
class ConnectionError(TransportError):
"""
Error raised when there was an exception while talking to ES. Original
exception from the underlying :class:`~elasticsearch.Connection`
implementation is available as ``.info``.
"""
def __str__(self):
return "ConnectionError(%s) caused by: %s(%s)" % (
self.error,
self.info.__class__.__name__,
self.info,
)
class SSLError(ConnectionError):
"""Error raised when encountering SSL errors."""
class ConnectionTimeout(ConnectionError):
"""A network timeout. Doesn't cause a node retry by default."""
def __str__(self):
return "ConnectionTimeout caused by - %s(%s)" % (
self.info.__class__.__name__,
self.info,
)
class NotFoundError(TransportError):
"""Exception representing a 404 status code."""
class ConflictError(TransportError):
"""Exception representing a 409 status code."""
class RequestError(TransportError):
"""Exception representing a 400 status code."""
class AuthenticationException(TransportError):
"""Exception representing a 401 status code."""
class AuthorizationException(TransportError):
"""Exception representing a 403 status code."""
class ElasticsearchWarning(Warning):
"""Warning that is raised when a deprecated option
or incorrect usage is flagged via the 'Warning' HTTP header.
"""
# Alias of 'ElasticsearchWarning' for backwards compatibility.
# Additional functionality was added to the 'Warning' HTTP header
# not related to deprecations.
ElasticsearchDeprecationWarning = ElasticsearchWarning
# more generic mappings from status_code to python exceptions
HTTP_EXCEPTIONS = {
400: RequestError,
401: AuthenticationException,
403: AuthorizationException,
404: NotFoundError,
409: ConflictError,
}
|
the-stack_0_27932
|
from ..css_matcher import scan, split_value, TokenType
from .utils import push_range, SelectItemModel
class CSSSection:
__slots__ = ('start', 'end', 'body_start', 'body_end', 'properties')
def __init__(self, start: int, end: int, body_start: int, body_end: int, properties: list=None):
self.start = start
self.end = end
self.body_start = body_start
self.body_end = body_end
self.properties = properties
def to_json(self):
result = {
'start': self.start,
'end': self.end,
'body_start': self.body_start,
'body_end': self.body_end
}
if self.properties:
result['properties'] = [prop.to_json() for prop in self.properties]
return result
class CSSProperty:
__slots__ = ('name', 'value', 'value_tokens', 'before', 'after')
def __init__(self, code: str, name: list, before: int, start: int, end: int, delimiter: int, offset=0):
self.name = (offset + name[0], offset + name[1])
self.value = (offset + start, offset + end)
self.value_tokens = split_value(code[start:end], offset + start)
self.before = before
self.after = offset + delimiter + 1
def to_json(self):
return {
'name': self.name,
'value': self.value,
'value_tokens': self.value_tokens,
'before': self.before,
'after': self.after
}
class ParseState:
__slots__ = ('type', 'start', 'end', 'value_start', 'value_end', 'value_delimiter')
def __init__(self):
self.type = None
self.start = -1
self.end = -1
self.value_start = -1
self.value_end = -1
self.value_delimiter = -1
def get_css_section(code: str, pos: int, properties=False) -> CSSSection:
"""
Returns context CSS section for given location in source code
:param properties Parse inner properties
"""
stack = []
pool = []
result = []
result.append(None) # Skip pylint warnings
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start > pos and not stack:
return False
if token_type == TokenType.Selector:
stack.append(alloc_range(pool, start, end, delimiter))
elif token_type == TokenType.BlockEnd:
sel = stack and stack.pop()
if sel and sel[0] <= pos <= end:
result[0] = CSSSection(sel[0], end, sel[2] + 1, start)
return False
release_range(pool, sel)
scan(code, scan_callback)
section = result[0]
if section and properties:
section.properties = parse_properties(code, section.body_start, section.body_end)
return section
def select_item_css(code: str, pos: int, is_prev=False) -> SelectItemModel:
"Returns list of ranges for Select Next/Previous CSS Item action"
if is_prev:
return select_previous_item(code, pos)
return select_next_item(code, pos)
def select_next_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting next item in CSS"
result = []
result.append(None)
pending_property = []
pending_property.append(None)
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start < pos:
return
if token_type == TokenType.Selector:
result[0] = SelectItemModel(start, end, [(start, end)])
return False
elif token_type == TokenType.PropertyName:
pending_property[0] = (start, end, delimiter)
elif token_type == TokenType.PropertyValue:
section = SelectItemModel(start, delimiter + 1 if delimiter != -1 else end, [])
result[0] = section
if pending_property[0]:
# Full property range
prop = pending_property[0]
section.start = prop[0]
push_range(section.ranges, (prop[0], section.end))
# Full value range
push_range(section.ranges, (start, end))
# Value fragments
for r in split_value(code[start:end]):
push_range(section.ranges, (r[0] + start, r[1] + start))
return False
elif pending_property[0]:
prop = pending_property[0]
result[0] = SelectItemModel(prop[0], prop[1], [(prop[0], prop[1])])
return False
scan(code, scan_callback)
return result[0]
def select_previous_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting previous item in CSS"
state = ParseState()
def scan_callback(token_type, start, end, delimiter):
# Accumulate context until we reach given position
if start >= pos and token_type != TokenType.PropertyValue:
return False
if token_type in (TokenType.Selector, TokenType.PropertyName):
state.start = start
state.end = end
state.type = token_type
state.value_start = state.value_end = state.value_delimiter = -1
elif token_type == TokenType.PropertyValue:
state.value_start = start
state.value_end = end
state.value_delimiter = delimiter
scan(code, scan_callback)
if state.type == TokenType.Selector:
return SelectItemModel(state.start, state.end, [(state.start, state.end)])
if state.type == TokenType.PropertyName:
result = SelectItemModel(state.start, state.end, [])
if state.value_start != -1:
result.end = state.value_delimiter + 1 if state.value_delimiter != -1 else state.value_end
# Full property range
push_range(result.ranges, (state.start, result.end))
# Full value range
push_range(result.ranges, (state.value_start, state.value_end))
# Value fragments
for r in split_value(code[state.value_start:state.value_end]):
push_range(result.ranges, (r[0] + state.value_start, r[1] + state.value_start))
else:
push_range(result.ranges, (state.start, state.end))
return result
class ParsePropertiesState:
__slots__ = ('pending_name', 'nested', 'before')
def __init__(self, before: int):
self.pending_name = None
self.nested = 0
self.before= before
def parse_properties(code: str, parse_from=0, parse_to=None) -> list:
"""
Parses properties in `from:to` fragment of `code`. Note that `from:to` must
point to CSS section content, e.g. *inside* `{` and `}` (or top-level code context),
all properties found in nested sections will be ignored
"""
if parse_to is None:
parse_to = len(code)
fragment = code[parse_from:parse_to]
result = []
pool = []
state = ParsePropertiesState(parse_from)
def scan_callback(token_type, start: int, end: int, delimiter: int):
if token_type == TokenType.Selector:
state.nested += 1
elif token_type == TokenType.BlockEnd:
state.nested -= 1
state.before = parse_from + end
elif not state.nested:
if token_type == TokenType.PropertyName:
if state.pending_name:
# Create property with empty value
value_pos = state.pending_name[2]
result.append(
CSSProperty(fragment, state.pending_name, state.before,
value_pos, value_pos, value_pos,
parse_from))
release_range(pool, state.pending_name)
state.before = parse_from + start
state.pending_name = alloc_range(pool, start, end, delimiter)
elif token_type == TokenType.PropertyValue:
if state.pending_name:
result.append(
CSSProperty(fragment, state.pending_name, state.before,
start, end, delimiter, parse_from))
release_range(pool, state.pending_name)
state.pending_name = None
state.before = parse_from + delimiter + 1
scan(fragment, scan_callback)
return result
def alloc_range(pool: list, start: int, end: int, delimiter: int) -> list:
"Allocates new token range from pool"
if pool:
rng = pool.pop()
rng[0] = start
rng[1] = end
rng[2] = delimiter
return rng
return [start, end, delimiter]
def release_range(pool: list, rng: list):
"Releases given token range and pushes it back into the pool"
if rng:
pool.append(rng)
|
the-stack_0_27934
|
# pylint: disable=missing-docstring, global-statement, unused-argument, broad-except
from __future__ import print_function
import sys
import os
import random
import uuid
import time
import datetime
import subprocess
import json
import traceback
import base64
import signal
try:
# for python 3
from http.client import HTTPConnection
except ImportError:
# for python 2
from httplib import HTTPConnection
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
ORIG_STDOUT = sys.stdout
ORIG_STDERR = sys.stderr
LOGS = ''
LOG_TAIL = False
STAY_OPEN = os.environ.get('DOCKER_LAMBDA_STAY_OPEN', '')
HANDLER = sys.argv[1] if len(sys.argv) > 1 else os.environ.get('AWS_LAMBDA_FUNCTION_HANDLER', \
os.environ.get('_HANDLER', 'lambda_function.lambda_handler'))
EVENT_BODY = sys.argv[2] if len(sys.argv) > 2 else os.environ.get('AWS_LAMBDA_EVENT_BODY', \
(sys.stdin.read() if os.environ.get('DOCKER_LAMBDA_USE_STDIN', False) else '{}'))
FUNCTION_NAME = os.environ.get('AWS_LAMBDA_FUNCTION_NAME', 'test')
FUNCTION_VERSION = os.environ.get('AWS_LAMBDA_FUNCTION_VERSION', '$LATEST')
MEM_SIZE = os.environ.get('AWS_LAMBDA_FUNCTION_MEMORY_SIZE', '1536')
DEADLINE_MS = int(time.time() * 1000) + int(os.environ.get('AWS_LAMBDA_FUNCTION_TIMEOUT', '300'))
REGION = os.environ.get('AWS_REGION', os.environ.get('AWS_DEFAULT_REGION', 'us-east-1'))
ACCOUNT_ID = os.environ.get('AWS_ACCOUNT_ID', random.randint(100000000000, 999999999999))
ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', 'SOME_ACCESS_KEY_ID')
SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', 'SOME_SECRET_ACCESS_KEY')
SESSION_TOKEN = os.environ.get('AWS_SESSION_TOKEN', None)
INVOKEID = str(uuid.uuid4())
INVOKE_MODE = 'event' # Either 'http' or 'event'
SUPPRESS_INIT = True # Forces calling _get_handlers_delayed()
DATA_SOCK = -1
CONTEXT_OBJS = {
'clientcontext': None,
'cognitoidentityid': None,
'cognitopoolid': None,
}
CREDENTIALS = {
'key': ACCESS_KEY_ID,
'secret': SECRET_ACCESS_KEY,
'session': SESSION_TOKEN
}
INVOKED_FUNCTION_ARN = os.environ.get('AWS_LAMBDA_FUNCTION_INVOKED_ARN', \
'arn:aws:lambda:%s:%s:function:%s' % (REGION, ACCOUNT_ID, FUNCTION_NAME))
XRAY_TRACE_ID = os.environ.get('_X_AMZN_TRACE_ID', None)
XRAY_PARENT_ID = None
XRAY_SAMPLED = None
TRACE_ID = None
INVOKED = False
ERRORED = False
TODAY = datetime.date.today()
# export needed stuff
os.environ['AWS_LAMBDA_LOG_GROUP_NAME'] = '/aws/lambda/%s' % FUNCTION_NAME
os.environ['AWS_LAMBDA_LOG_STREAM_NAME'] = "%s/%s/%s/[%s]%s" % (
TODAY.year,
TODAY.month,
TODAY.day,
FUNCTION_VERSION,
'%016x' % random.randrange(16**16)
)
os.environ["AWS_LAMBDA_FUNCTION_NAME"] = FUNCTION_NAME
os.environ['AWS_LAMBDA_FUNCTION_MEMORY_SIZE'] = MEM_SIZE
os.environ['AWS_LAMBDA_FUNCTION_VERSION'] = FUNCTION_VERSION
os.environ['AWS_REGION'] = REGION
os.environ['AWS_DEFAULT_REGION'] = REGION
os.environ['_HANDLER'] = HANDLER
MOCKSERVER_ENV = os.environ.copy()
MOCKSERVER_ENV['DOCKER_LAMBDA_NO_BOOTSTRAP'] = '1'
MOCKSERVER_ENV['DOCKER_LAMBDA_USE_STDIN'] = '1'
MOCKSERVER_PROCESS = subprocess.Popen(
'/var/runtime/mockserver', stdin=subprocess.PIPE, env=MOCKSERVER_ENV)
MOCKSERVER_PROCESS.stdin.write(EVENT_BODY.encode())
MOCKSERVER_PROCESS.stdin.close()
MOCKSERVER_CONN = HTTPConnection("127.0.0.1", 9001)
def eprint(*args, **kwargs):
print(*args, file=ORIG_STDERR, **kwargs)
def report_user_init_start():
return
def report_user_init_end():
return
def report_user_invoke_start():
return
def report_user_invoke_end():
return
def receive_start():
global MOCKSERVER_CONN
for retry in range(20):
try:
MOCKSERVER_CONN = HTTPConnection("127.0.0.1", 9001)
MOCKSERVER_CONN.request("GET", "/2018-06-01/ping")
resp = MOCKSERVER_CONN.getresponse()
if resp.status != 200:
raise Exception("Mock server returned %d" % resp.status)
resp.read()
break
except Exception:
if retry >= 19:
raise
else:
time.sleep(.005)
continue
return (
INVOKEID,
INVOKE_MODE,
HANDLER,
SUPPRESS_INIT,
CREDENTIALS
)
def report_running(invokeid):
return
def receive_invoke():
global INVOKED
global INVOKEID
global DEADLINE_MS
global INVOKED_FUNCTION_ARN
global XRAY_TRACE_ID
global EVENT_BODY
global CONTEXT_OBJS
global LOGS
global LOG_TAIL
try:
MOCKSERVER_CONN.request("GET", "/2018-06-01/runtime/invocation/next")
resp = MOCKSERVER_CONN.getresponse()
if resp.status != 200:
raise Exception("/invocation/next return status %d" % resp.status)
except Exception:
if INVOKED and not STAY_OPEN:
sys.exit(1 if ERRORED else 0)
return ()
raise
if INVOKED:
LOGS = ""
INVOKED = True
INVOKEID = resp.getheader('Lambda-Runtime-Aws-Request-Id')
DEADLINE_MS = resp.getheader('Lambda-Runtime-Deadline-Ms')
INVOKED_FUNCTION_ARN = resp.getheader(
'Lambda-Runtime-Invoked-Function-Arn')
XRAY_TRACE_ID = resp.getheader('Lambda-Runtime-Trace-Id')
cognito_identity = json.loads(resp.getheader(
'Lambda-Runtime-Cognito-Identity', '{}'))
CONTEXT_OBJS['cognitoidentityid'] = cognito_identity.get('identity_id')
CONTEXT_OBJS['cognitopoolid'] = cognito_identity.get('identity_pool_id')
CONTEXT_OBJS['clientcontext'] = resp.getheader(
'Lambda-Runtime-Client-Context')
LOG_TAIL = resp.getheader('docker-lambda-log-type') == 'Tail'
EVENT_BODY = resp.read()
return (
INVOKEID,
DATA_SOCK,
CREDENTIALS,
EVENT_BODY,
CONTEXT_OBJS,
INVOKED_FUNCTION_ARN,
XRAY_TRACE_ID,
)
def report_fault(invokeid, msg, except_value, trace):
global ERRORED
ERRORED = True
if msg and except_value:
eprint('%s: %s' % (msg, except_value))
if trace:
eprint('%s' % trace)
def report_done(invokeid, errortype, result, is_fatal):
global ERRORED
if not INVOKED:
return
if errortype is not None:
ERRORED = True
result_obj = json.loads(result)
stack_trace = result_obj.get('stackTrace')
if stack_trace is not None:
result_obj['stackTrace'] = traceback.format_list(stack_trace)
result = json.dumps(result_obj)
headers = {"Docker-Lambda-Log-Result": base64.b64encode(LOGS.encode())} if LOG_TAIL else {}
MOCKSERVER_CONN.request("POST", "/2018-06-01/runtime/invocation/%s/%s" % \
(invokeid, "response" if errortype is None else "error"), result, headers)
resp = MOCKSERVER_CONN.getresponse()
if resp.status != 202:
raise Exception("/invocation/response return status %d" % resp.status)
resp.read()
def report_xray_exception(xray_json):
return
def log_bytes(msg, fileno):
global LOGS
if STAY_OPEN:
if LOG_TAIL:
LOGS += msg
(ORIG_STDOUT if fileno == 1 else ORIG_STDERR).write(msg)
else:
ORIG_STDERR.write(msg)
def log_sb(msg):
return
def get_remaining_time():
return DEADLINE_MS - int(time.time() * 1000)
def send_console_message(msg, byte_length):
log_bytes(msg + '\n', 1)
|
the-stack_0_27938
|
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from mmdet.core import eval_recalls
from mmdet.utils import print_log
from .custom import CustomDataset
from .registry import DATASETS
@DATASETS.register_module
class CocoDataset(CustomDataset):
CLASSES = (
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic_light",
"fire_hydrant",
"stop_sign",
"parking_meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports_ball",
"kite",
"baseball_bat",
"baseball_glove",
"skateboard",
"surfboard",
"tennis_racket",
"bottle",
"wine_glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot_dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted_plant",
"bed",
"dining_table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell_phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy_bear",
"hair_drier",
"toothbrush",
)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info["filename"] = info["file_name"]
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]["id"]
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_["image_id"] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:
continue
if min(img_info["width"], img_info["height"]) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map. "masks" are raw annotations and not
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get("ignore", False):
continue
x1, y1, w, h = ann["bbox"]
if ann["area"] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann.get("iscrowd", False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann["category_id"]])
gt_masks_ann.append(ann["segmentation"])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info["filename"].replace("jpg", "png")
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map,
)
return ann
def xyxy2xywh(self, bbox):
_bbox = bbox.tolist()
return [_bbox[0], _bbox[1], _bbox[2] - _bbox[0] + 1, _bbox[3] - _bbox[1] + 1]
def _proposal2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data["image_id"] = img_id
data["bbox"] = self.xyxy2xywh(bboxes[i])
data["score"] = float(bboxes[i][4])
data["category_id"] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data["image_id"] = img_id
data["bbox"] = self.xyxy2xywh(bboxes[i])
data["score"] = float(bboxes[i][4])
data["category_id"] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data["image_id"] = img_id
data["bbox"] = self.xyxy2xywh(bboxes[i])
data["score"] = float(bboxes[i][4])
data["category_id"] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data["image_id"] = img_id
data["bbox"] = self.xyxy2xywh(bboxes[i])
data["score"] = float(mask_score[i])
data["category_id"] = self.cat_ids[label]
if isinstance(segms[i]["counts"], bytes):
segms[i]["counts"] = segms[i]["counts"].decode()
data["segmentation"] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files["bbox"] = "{}.{}.json".format(outfile_prefix, "bbox")
result_files["proposal"] = "{}.{}.json".format(outfile_prefix, "bbox")
mmcv.dump(json_results, result_files["bbox"])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files["bbox"] = "{}.{}.json".format(outfile_prefix, "bbox")
result_files["proposal"] = "{}.{}.json".format(outfile_prefix, "bbox")
result_files["segm"] = "{}.{}.json".format(outfile_prefix, "segm")
mmcv.dump(json_results[0], result_files["bbox"])
mmcv.dump(json_results[1], result_files["segm"])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files["proposal"] = "{}.{}.json".format(outfile_prefix, "proposal")
mmcv.dump(json_results, result_files["proposal"])
else:
raise TypeError("invalid type of results")
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.getAnnIds(imgIds=self.img_ids[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get("ignore", False) or ann["iscrowd"]:
continue
x1, y1, w, h = ann["bbox"]
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger
)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list): Testing results of the dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), "results must be a list"
assert len(results) == len(
self
), "The length of results is not equal to the dataset len: {} != {}".format(
len(results), len(self)
)
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, "results")
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(
self,
results,
metric="bbox",
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05),
):
"""Evaluation in COCO protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str: float]
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ["bbox", "segm", "proposal", "proposal_fast"]
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError("metric {} is not supported".format(metric))
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = "Evaluating {}...".format(metric)
if logger is None:
msg = "\n" + msg
print_log(msg, logger=logger)
if metric == "proposal_fast":
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger="silent"
)
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results["AR@{}".format(num)] = ar[i]
log_msg.append("\nAR@{}\t{:.4f}".format(num, ar[i]))
log_msg = "".join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError("{} is not in results".format(metric))
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
"The testing results of the whole dataset is empty.",
logger=logger,
level=logging.ERROR,
)
break
iou_type = "bbox" if metric == "proposal" else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.imgIds = self.img_ids
if metric == "proposal":
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
metric_items = [
"AR@100",
"AR@300",
"AR@1000",
"AR_s@1000",
"AR_m@1000",
"AR_l@1000",
]
for i, item in enumerate(metric_items):
val = float("{:.3f}".format(cocoEval.stats[i + 6]))
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
pass # TODO
metric_items = ["mAP", "mAP_50", "mAP_75", "mAP_s", "mAP_m", "mAP_l"]
for i in range(len(metric_items)):
key = "{}_{}".format(metric, metric_items[i])
val = float("{:.3f}".format(cocoEval.stats[i]))
eval_results[key] = val
eval_results["{}_mAP_copypaste".format(metric)] = (
"{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} "
"{ap[4]:.3f} {ap[5]:.3f}"
).format(ap=cocoEval.stats[:6])
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
the-stack_0_27941
|
from typing import Callable
from rx3.core import Observable
from rx3.core.typing import Mapper
from rx3.disposable import SerialDisposable, CompositeDisposable, SingleAssignmentDisposable
from rx3.scheduler import ImmediateScheduler
def _expand(mapper: Mapper) -> Callable[[Observable], Observable]:
def expand(source: Observable) -> Observable:
"""Expands an observable sequence by recursively invoking
mapper.
Args:
source: Source obserable to expand.
Returns:
An observable sequence containing all the elements produced
by the recursive expansion.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or ImmediateScheduler.singleton()
queue = []
m = SerialDisposable()
d = CompositeDisposable(m)
active_count = [0]
is_acquired = [False]
def ensure_active():
is_owner = False
if queue:
is_owner = not is_acquired[0]
is_acquired[0] = True
def action(scheduler, state):
if queue:
work = queue.pop(0)
else:
is_acquired[0] = False
return
sad = SingleAssignmentDisposable()
d.add(sad)
def on_next(value):
observer.on_next(value)
result = None
try:
result = mapper(value)
except Exception as ex:
observer.on_error(ex)
return
queue.append(result)
active_count[0] += 1
ensure_active()
def on_complete():
d.remove(sad)
active_count[0] -= 1
if active_count[0] == 0:
observer.on_completed()
sad.disposable = work.subscribe_(on_next, observer.on_error, on_complete, scheduler)
m.disposable = scheduler.schedule(action)
if is_owner:
m.disposable = scheduler.schedule(action)
queue.append(source)
active_count[0] += 1
ensure_active()
return d
return Observable(subscribe)
return expand
|
the-stack_0_27942
|
"""
Lock Exchange Test case
=======================
Solves hydrostatic flow in a closed rectangular channel.
Dianeutral mixing depends on mesh Reynolds number [1]
Re_h = U dx / nu
U = 0.5 m/s characteristic velocity ~ 0.5*sqrt(g_h drho/rho_0)
dx = horizontal mesh size
nu = background viscosity
Smagorinsky factor should be C_s = 1/sqrt(Re_h)
Mesh resolutions:
- ilicak [1]: dx = 500 m, 20 layers
COMODO lock exchange benchmark [2]:
- coarse: dx = 2000 m, 10 layers
- coarse2 (*): dx = 1000 m, 20 layers
- medium: dx = 500 m, 40 layers
- medium2 (*): dx = 250 m, 80 layers
- fine: dx = 125 m, 160 layers
(*) not part of the original benchmark
[1] Ilicak et al. (2012). Spurious dianeutral mixing and the role of
momentum closure. Ocean Modelling, 45-46(0):37-58.
http://dx.doi.org/10.1016/j.ocemod.2011.10.003
[2] COMODO Lock Exchange test.
http://indi.imag.fr/wordpress/?page_id=446
[3] Petersen et al. (2015). Evaluation of the arbitrary Lagrangian-Eulerian
vertical coordinate method in the MPAS-Ocean model. Ocean Modelling,
86:93-113.
http://dx.doi.org/10.1016/j.ocemod.2014.12.004
"""
from thetis import *
from diagnostics import *
from plotting import *
def run_lockexchange(reso_str='coarse', poly_order=1, element_family='dg-dg',
reynolds_number=1.0, use_limiter=True, dt=None,
viscosity='const', laxfriedrichs_vel=0.0,
laxfriedrichs_trc=0.0,
elem_type='tri',
load_export_ix=None, iterate=True, **custom_options):
"""
Runs lock exchange problem with a bunch of user defined options.
"""
comm = COMM_WORLD
if laxfriedrichs_vel is None:
laxfriedrichs_vel = 0.0
if laxfriedrichs_trc is None:
laxfriedrichs_trc = 0.0
depth = 20.0
refinement = {'huge': 0.6, 'coarse': 1, 'coarse2': 2, 'medium': 4,
'medium2': 8, 'fine': 16, 'ilicak': 4}
# set mesh resolution
if '-' in reso_str:
words = reso_str.split('-')
delta_x, delta_z = [float(f) for f in words]
layers = int(np.ceil(depth/delta_z))
else:
delta_x = 2000.0/refinement[reso_str]
layers = int(round(10*refinement[reso_str]))
if reso_str == 'ilicak':
layers = 20
# generate unit mesh and transform its coords
x_max = 32.0e3
x_min = -32.0e3
n_x = (x_max - x_min)/delta_x
mesh2d = UnitSquareMesh(n_x, 2, quadrilateral=(elem_type == 'quad'))
coords = mesh2d.coordinates
# x in [x_min, x_max], y in [-dx, dx]
coords.dat.data[:, 0] = coords.dat.data[:, 0]*(x_max - x_min) + x_min
coords.dat.data[:, 1] = coords.dat.data[:, 1]*2*delta_x - delta_x
# temperature and salinity, for linear eq. of state (from Petersen, 2015)
temp_left = 5.0
temp_right = 30.0
salt_const = 35.0
rho_0 = 1000.0
physical_constants['rho0'].assign(rho_0)
# compute horizontal viscosity
uscale = 0.5
nu_scale = uscale * delta_x / reynolds_number
if reynolds_number < 0:
reynolds_number = float("inf")
nu_scale = 0.0
u_max = 1.0
w_max = 1.2e-2
t_end = 25 * 3600
t_export = 15*60.0
if os.getenv('THETIS_REGRESSION_TEST') is not None:
t_end = 5*t_export
lim_str = '_lim' if use_limiter else ''
options_str = '_'.join([reso_str,
element_family,
elem_type,
'p{:}'.format(poly_order),
'visc-{:}'.format(viscosity),
'Re{:}'.format(reynolds_number),
'lf-vel{:.1f}'.format(laxfriedrichs_vel),
'lf-trc{:.1f}'.format(laxfriedrichs_trc),
]) + lim_str
outputdir = 'outputs_' + options_str
# bathymetry
p1_2d = FunctionSpace(mesh2d, 'CG', 1)
bathymetry_2d = Function(p1_2d, name='Bathymetry')
bathymetry_2d.assign(depth)
# create solver
solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, layers)
options = solver_obj.options
options.polynomial_degree = poly_order
options.element_family = element_family
options.timestepper_type = 'SSPRK22'
options.solve_salinity = False
options.constant_salinity = Constant(salt_const)
options.solve_temperature = True
options.use_implicit_vertical_diffusion = False
options.use_bottom_friction = False
options.use_ale_moving_mesh = True
options.use_baroclinic_formulation = True
options.use_lax_friedrichs_velocity = laxfriedrichs_vel > 0.0
options.use_lax_friedrichs_tracer = laxfriedrichs_trc > 0.0
options.lax_friedrichs_velocity_scaling_factor = Constant(laxfriedrichs_vel)
options.lax_friedrichs_tracer_scaling_factor = Constant(laxfriedrichs_trc)
options.use_limiter_for_tracers = use_limiter
options.use_limiter_for_velocity = use_limiter
# To keep const grid Re_h, viscosity scales with grid: nu = U dx / Re_h
if viscosity == 'smag':
options.use_smagorinsky_viscosity = True
options.smagorinsky_coefficient = Constant(1.0/np.sqrt(reynolds_number))
elif viscosity == 'const':
options.horizontal_viscosity = Constant(nu_scale)
else:
raise Exception('Unknow viscosity type {:}'.format(viscosity))
options.vertical_viscosity = Constant(1e-4)
options.horizontal_diffusivity = None
options.horizontal_viscosity_scale = Constant(nu_scale)
options.horizontal_velocity_scale = Constant(u_max)
options.vertical_velocity_scale = Constant(w_max)
if dt is not None:
options.timestepper_options.use_automatic_timestep = False
options.timestep = dt
options.simulation_export_time = t_export
options.simulation_end_time = t_end
options.output_directory = outputdir
options.check_volume_conservation_2d = True
options.check_volume_conservation_3d = True
options.check_temperature_conservation = True
options.check_temperature_overshoot = True
options.fields_to_export = ['uv_2d', 'elev_2d', 'uv_3d',
'w_3d', 'w_mesh_3d', 'temp_3d', 'density_3d',
'uv_dav_2d', 'uv_dav_3d', 'baroc_head_3d',
'smag_visc_3d']
options.fields_to_export_hdf5 = list(options.fields_to_export)
options.equation_of_state_type = 'linear'
options.equation_of_state_options.rho_ref = rho_0
options.equation_of_state_options.s_ref = 35.0
options.equation_of_state_options.th_ref = 5.0
options.equation_of_state_options.alpha = 0.2
options.equation_of_state_options.beta = 0.0
options.update(custom_options)
if comm.size == 1:
solver_obj.add_callback(RPECalculator(solver_obj))
solver_obj.add_callback(FrontLocationCalculator(solver_obj))
# solver_obj.add_callback(PlotCallback(solver_obj, append_to_log=False))
solver_obj.create_equations()
print_output('Running lock exchange problem with options:')
print_output('Resolution: {:}'.format(reso_str))
print_output('Reynolds number: {:}'.format(reynolds_number))
print_output('Use slope limiters: {:}'.format(use_limiter))
print_output('Horizontal viscosity: {:}'.format(nu_scale))
print_output('Lax-Friedrichs factor vel: {:}'.format(laxfriedrichs_vel))
print_output('Lax-Friedrichs factor trc: {:}'.format(laxfriedrichs_trc))
print_output('Exporting to {:}'.format(outputdir))
esize = solver_obj.fields.h_elem_size_2d
min_elem_size = comm.allreduce(np.min(esize.dat.data), op=MPI.MIN)
max_elem_size = comm.allreduce(np.max(esize.dat.data), op=MPI.MAX)
print_output('Elem size: {:} {:}'.format(min_elem_size, max_elem_size))
temp_init3d = Function(solver_obj.function_spaces.H, name='initial temperature')
x, y, z = SpatialCoordinate(solver_obj.mesh)
# vertical barrier
# temp_init3d.interpolate(conditional(x > 0.0, temp_right, temp_left))
# smooth condition
sigma = 10.0
temp_init3d.interpolate(
temp_left - (temp_left - temp_right)*0.5*(tanh(x/sigma) + 1.0)
)
if load_export_ix is None:
solver_obj.assign_initial_conditions(temp=temp_init3d)
else:
assert isinstance(load_export_ix, int)
solver_obj.load_state(load_export_ix)
if iterate:
solver_obj.iterate()
return solver_obj
def get_argparser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--reso_str', type=str,
help='mesh resolution string. A named mesh or "dx-dz" string',
default='coarse')
parser.add_argument('--no-limiter', action='store_false', dest='use_limiter',
help='do not use slope limiter for tracers')
parser.add_argument('-p', '--poly_order', type=int, default=1,
help='order of finite element space')
parser.add_argument('-f', '--element-family', type=str,
help='finite element family', default='dg-dg')
parser.add_argument('-re', '--reynolds-number', type=float, default=1.0,
help='mesh Reynolds number for Smagorinsky scheme')
parser.add_argument('-dt', '--dt', type=float,
help='force value for 3D time step')
parser.add_argument('-visc', '--viscosity', type=str,
help='Type of horizontal viscosity',
default='const',
choices=['const', 'smag'])
parser.add_argument('-lf-trc', '--laxfriedrichs-trc', type=float,
help='Lax-Friedrichs flux factor for tracers',
default=0.0)
parser.add_argument('-lf-vel', '--laxfriedrichs-vel', type=float,
help='Lax-Friedrichs flux factor for velocity',
default=0.0)
parser.add_argument('-e', '--elem-type', type=str,
help='Type of 2D element, either "tri" or "quad"',
default='tri')
return parser
def parse_options():
parser = get_argparser()
args, unknown_args = parser.parse_known_args()
args_dict = vars(args)
run_lockexchange(**args_dict)
if __name__ == '__main__':
parse_options()
|
the-stack_0_27943
|
import cv2
import os
def get_file_name(path):
file_names = [x for x in os.walk(path)][0][2]
file_names = ['.'.join(x.split('.')[:-1]) for x in file_names]
return file_names
def load_data(data_path:str,label_path:str):
cap = cv2.VideoCapture(data_path)
data_list = []
while cap.isOpened():
ret,frame = cap.read()
if not ret:
break
data_list.append(cv2.resize(frame,(300,300)))
save_shape=[300,300]
with open(label_path, "r", encoding="utf-8") as f:
readlines = f.readlines()
boxs = []
for readline in readlines:
readline_split = readline.strip().split(",")
box = [
[int(float(readline_split[0]) * save_shape[1]), int(float(readline_split[1]) * save_shape[0])],
[int(float(readline_split[2]) * save_shape[1]), int(float(readline_split[3]) * save_shape[0])],
readline_split[4]]
boxs.append(box)
return data_list,boxs
if __name__ == '__main__':
import cv2
import numpy as np
org_data_dirs=[
# "D:\data\smoke_car\\rebuild_data_slim\\base_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\DeAn_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\GuRun_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\HeNeng_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\TongHua_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\WanZai_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\XinXiang_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\YunJing_dataset",
# "D:\data\smoke_car\\rebuild_data_slim\\ZhangYe_dataset",
"D:\data\smoke_car\\rebuild_data_slim\\test_dataset",
]
save_data_pardir="D:\data\smoke_car/smoke_classification_data"
for org_data_dir in org_data_dirs:
save_data_dir = save_data_pardir+"/%s"%os.path.basename(org_data_dir)
if not os.path.exists(save_data_dir):
os.makedirs(save_data_dir)
file_names = get_file_name(org_data_dir+"/data")
for file_name in file_names:
data, label = load_data(
"%s/data/%s.mp4"%(org_data_dir,file_name),
"%s/label/%s.txt"%(org_data_dir,file_name))
have_smoke=False
for x in label:
if x[-1] == "smoke":
have_smoke=True
label = x
break
elif "smoke" in x[-1]:
print("")
continue
if not have_smoke:
continue
center_point = np.random.randint(0, 300, 2)
max_wh = 50
else:
# continue
center_point = (np.array(label[0])+np.array(label[1]))//2
max_wh = np.max(np.array(label[1])-np.array(label[0]))//2*3
max_wh = min(80,max_wh)
center_point=np.clip(center_point,max_wh,300-max_wh)
video_data=[]
for d in data:
show_data = d[center_point[1]-max_wh:center_point[1]+max_wh,center_point[0]-max_wh:center_point[0]+max_wh,:]
show_data = cv2.resize(show_data,(100,100))
video_data.append(show_data)
video_data = np.concatenate(video_data,0)
save_path = "%s/data/%s.jpg"%(save_data_dir,file_name)
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
cv2.imencode(".jpg",video_data)[1].tofile(save_path)
save_path = "%s/label/%s.txt"%(save_data_dir,file_name)
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
with open(save_path,"w",encoding="utf-8") as f:
if have_smoke:
f.write("T")
else:
f.write("F")
|
the-stack_0_27947
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from operator import itemgetter
from functools import wraps
import re
from six import (iterkeys, binary_type, text_type, string_types, integer_types,
iteritems, PY3)
from six.moves import xrange
from six.moves.urllib.parse import urlparse, urlencode, quote_plus
import certifi
from elasticsearch.connection_pool import RandomSelector
from elasticsearch.exceptions import (ConnectionError, ConnectionTimeout,
TransportError, SerializationError)
from elasticsearch.transport import Transport
import simplejson as json # for use_decimal
from pyelasticsearch.exceptions import (ElasticHttpError,
ElasticHttpNotFoundError,
IndexAlreadyExistsError,
InvalidJsonResponseError,
BulkError)
def _add_es_kwarg_docs(params, method):
"""
Add stub documentation for any args in ``params`` that aren't already in
the docstring of ``method``.
The stubs may not tell much about each arg, but they serve the important
purpose of letting the user know that they're safe to use--we won't be
paving over them in the future for something pyelasticsearch-specific.
"""
def docs_for_kwarg(p):
return '\n :arg %s: See the ES docs.' % p
doc = method.__doc__
if doc is not None: # It's none under python -OO.
# Handle the case where there are no :arg declarations to key off:
if '\n :arg' not in doc and params:
first_param, params = params[0], params[1:]
doc = doc.replace('\n (Insert es_kwargs here.)',
docs_for_kwarg(first_param))
for p in params:
if ('\n :arg %s: ' % p) not in doc:
# Find the last documented arg so we can put our generated docs
# after it. No need to explicitly compile this; the regex cache
# should serve.
insertion_point = re.search(
r' :arg (.*?)(?=\n+ (?:$|[^: ]))',
doc,
re.MULTILINE | re.DOTALL).end()
doc = ''.join([doc[:insertion_point],
docs_for_kwarg(p),
doc[insertion_point:]])
method.__doc__ = doc
def es_kwargs(*args_to_convert):
"""
Mark which kwargs will become query string params in the eventual ES call.
Return a decorator that grabs the kwargs of the given names, plus any
beginning with "es_", subtracts them from the ordinary kwargs, and passes
them to the decorated function through the ``query_params`` kwarg. The
remaining kwargs and the args are passed through unscathed.
Also, if any of the given kwargs are undocumented in the decorated method's
docstring, add stub documentation for them.
"""
convertible_args = set(args_to_convert)
def decorator(func):
# Add docs for any missing query params:
_add_es_kwarg_docs(args_to_convert, func)
@wraps(func)
def decorate(*args, **kwargs):
# Make kwargs the map of normal kwargs and query_params the map of
# kwargs destined for query string params.
# Let one @es_kwargs-wrapped function call another:
query_params = kwargs.pop('query_params', {})
for k in list(iterkeys(kwargs)): # Make a copy; we mutate kwargs.
if k.startswith('es_'):
query_params[k[3:]] = kwargs.pop(k)
elif k in convertible_args:
query_params[k] = kwargs.pop(k)
return func(*args, query_params=query_params, **kwargs)
return decorate
return decorator
class JsonEncoder(json.JSONEncoder):
def default(self, value):
"""Convert more Python data types to ES-understandable JSON."""
iso = _iso_datetime(value)
if iso:
return iso
if not PY3 and isinstance(value, str):
return unicode(value, errors='replace') # TODO: Be stricter.
if isinstance(value, set):
return list(value)
return super(JsonEncoder, self).default(value)
class ElasticSearch(object):
"""
An object which manages connections to elasticsearch and acts as a
go-between for API calls to it
This object is thread-safe. You can create one instance and share it
among all threads.
"""
#: You can set this attribute on an instance to customize JSON encoding.
#: The stock JsonEncoder class maps Python datetimes to ES-style datetimes
#: and Python sets to ES lists. You can subclass it to add more.
json_encoder = JsonEncoder
def __init__(self,
urls='http://localhost',
timeout=60,
max_retries=0,
port=9200,
username=None,
password=None,
ca_certs=certifi.where(),
client_cert=None):
"""
:arg urls: A URL or iterable of URLs of ES nodes. These can be full
URLs with port numbers, like
``http://elasticsearch.example.com:9200``, or you can pass the
port separately using the ``port`` kwarg. To do HTTP basic
authentication, you can use RFC-2617-style URLs like
``http://someuser:[email protected]:9200`` or the separate
``username`` and ``password`` kwargs below.
:arg timeout: Number of seconds to wait for each request before raising
Timeout
:arg max_retries: How many other servers to try, in series, after a
request times out or a connection fails
:arg username: Authentication username to send via HTTP basic auth
:arg password: Password to use in HTTP basic auth. If a username and
password are embedded in a URL, those are favored.
:arg port: The default port to connect on, for URLs that don't include
an explicit port
:arg ca_certs: A path to a bundle of CA certificates to trust. The
default is to use Mozilla's bundle, the same one used by Firefox.
:arg client_cert: A certificate to authenticate the client to the
server
"""
if isinstance(urls, string_types):
urls = [urls]
urls = [u.rstrip('/') for u in urls]
# Automatic node sniffing is off for now.
parsed_urls = (urlparse(url) for url in urls)
auth_default = None if username is None else (username, password)
self._transport = Transport(
[{'host': url.hostname,
'port': url.port or port,
'http_auth': (url.username, url.password) if
url.username or url.password else auth_default,
'use_ssl': url.scheme == 'https',
'verify_certs': True,
'ca_certs': ca_certs,
'cert_file': client_cert}
for url in parsed_urls],
max_retries=max_retries,
retry_on_timeout=True,
timeout=timeout,
selector_class=RandomSelector)
def _concat(self, items):
"""
Return a comma-delimited concatenation of the elements of ``items``.
If ``items`` is a string, promote it to a 1-item list.
"""
if items is None:
return ''
if isinstance(items, string_types):
items = [items]
return ','.join(items)
def _to_query(self, obj):
"""
Convert a native-Python object to a unicode or bytestring
representation suitable for a query string.
"""
# Quick and dirty thus far
if isinstance(obj, string_types):
return obj
if isinstance(obj, bool):
return 'true' if obj else 'false'
if isinstance(obj, integer_types):
return str(obj)
if isinstance(obj, float):
return repr(obj) # str loses precision.
if isinstance(obj, (list, tuple)):
return ','.join(self._to_query(o) for o in obj)
iso = _iso_datetime(obj)
if iso:
return iso
raise TypeError("_to_query() doesn't know how to represent %r in an ES"
' query string.' % obj)
def _utf8(self, thing):
"""Convert any arbitrary ``thing`` to a utf-8 bytestring."""
if isinstance(thing, binary_type):
return thing
if not isinstance(thing, text_type):
thing = text_type(thing)
return thing.encode('utf-8')
def _join_path(self, path_components):
"""
Smush together the path components, omitting '' and None ones.
Unicodes get encoded to strings via utf-8. Incoming strings are assumed
to be utf-8-encoded already.
"""
path = '/'.join(quote_plus(self._utf8(p), '') for p in path_components if
p is not None and p != '')
if not path.startswith('/'):
path = '/' + path
return path
def send_request(self,
method,
path_components,
body='',
query_params=None):
"""
Send an HTTP request to ES, and return the JSON-decoded response.
This is mostly an internal method, but it also comes in handy if you
need to use a brand new ES API that isn't yet explicitly supported by
pyelasticsearch, while still taking advantage of our connection pooling
and retrying.
Retry the request on different servers if the first one is down and
the ``max_retries`` constructor arg was > 0.
On failure, raise an
:class:`~pyelasticsearch.exceptions.ElasticHttpError`, a
:class:`~pyelasticsearch.exceptions.ConnectionError`, or a
:class:`~pyelasticsearch.exceptions.Timeout`.
:arg method: An HTTP method, like "GET"
:arg path_components: An iterable of path components, to be joined by
"/"
:arg body: A map of key/value pairs to be sent as the JSON request
body. Alternatively, a string to be sent verbatim, without further
JSON encoding.
:arg query_params: A map of querystring param names to values or
``None``
"""
if query_params is None:
query_params = {}
path = self._join_path(path_components)
# We wrap to use pyelasticsearch's exception hierarchy for backward
# compatibility:
try:
# This implicitly converts dicts to JSON. Strings are left alone:
_, prepped_response = self._transport.perform_request(
method,
path,
params=dict((k, self._utf8(self._to_query(v)))
for k, v in iteritems(query_params)),
body=body)
except SerializationError as exc:
raise InvalidJsonResponseError(exc.args[0])
except (ConnectionError, ConnectionTimeout) as exc:
# Pull the urllib3-native exception out, and raise it:
raise exc.info
except TransportError as exc:
status = exc.args[0]
error_message = exc.args[1]
self._raise_exception(status, error_message)
return prepped_response
def _raise_exception(self, status, error_message):
"""Raise an exception based on an error-indicating response from ES."""
error_class = ElasticHttpError
if status == 404:
error_class = ElasticHttpNotFoundError
elif (hasattr(error_message, 'startswith') and
(error_message.startswith('IndexAlreadyExistsException') or
'nested: IndexAlreadyExistsException' in error_message)):
error_class = IndexAlreadyExistsError
raise error_class(status, error_message)
def _encode_json(self, value):
"""
Convert a Python value to a form suitable for ElasticSearch's JSON DSL.
"""
return json.dumps(value, cls=self.json_encoder, use_decimal=True)
## REST API
@es_kwargs('routing', 'parent', 'timestamp', 'ttl', 'percolate',
'consistency', 'replication', 'refresh', 'timeout', 'fields')
def index(self, index, doc_type, doc, id=None, overwrite_existing=True,
query_params=None):
"""
Put a typed JSON document into a specific index to make it searchable.
:arg index: The name of the index to which to add the document
:arg doc_type: The type of the document
:arg doc: A Python mapping object, convertible to JSON, representing
the document
:arg id: The ID to give the document. Leave blank to make one up.
:arg overwrite_existing: Whether we should overwrite existing documents
of the same ID and doc type
:arg routing: A value hashed to determine which shard this indexing
request is routed to
:arg parent: The ID of a parent document, which leads this document to
be routed to the same shard as the parent, unless ``routing``
overrides it.
:arg timestamp: An explicit value for the (typically automatic)
timestamp associated with a document, for use with ``ttl`` and such
:arg ttl: The time until this document is automatically removed from
the index. Can be an integral number of milliseconds or a duration
like '1d'.
:arg percolate: An indication of which percolator queries, registered
against this index, should be checked against the new document: '*'
or a query string like 'color:green'
:arg consistency: An indication of how many active shards the contact
node should demand to see in order to let the index operation
succeed: 'one', 'quorum', or 'all'
:arg replication: Set to 'async' to return from ES before finishing
replication.
:arg refresh: Pass True to refresh the index after adding the document.
:arg timeout: A duration to wait for the relevant primary shard to
become available, in the event that it isn't: for example, "5m"
See `ES's index API`_ for more detail.
.. _`ES's index API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
"""
# :arg query_params: A map of other querystring params to pass along to
# ES. This lets you use future ES features without waiting for an
# update to pyelasticsearch. If we just used **kwargs for this, ES
# could start using a querystring param that we already used as a
# kwarg, and we'd shadow it. Name these params according to the names
# they have in ES's REST API, but prepend "\es_": for example,
# ``es_version=2``.
# TODO: Support version along with associated "preference" and
# "version_type" params.
if not overwrite_existing:
query_params['op_type'] = 'create'
return self.send_request('POST' if id is None else 'PUT',
[index, doc_type, id],
doc,
query_params)
@es_kwargs('consistency', 'refresh', 'replication', 'routing', 'timeout')
def bulk(self, actions, index=None, doc_type=None, query_params=None):
"""
Perform multiple index, delete, create, or update actions per request.
Used with helper routines :meth:`index_op()`, :meth:`delete_op()`, and
:meth:`update_op()`, this provides an efficient, readable way to do
large-scale changes. This contrived example illustrates the structure::
es.bulk([es.index_op({'title': 'All About Cats', 'pages': 20}),
es.index_op({'title': 'And Rats', 'pages': 47}),
es.index_op({'title': 'And Bats', 'pages': 23})],
doc_type='book',
index='library')
More often, you'll want to index (or delete or update) a larger number
of documents. In those cases, yield your documents from a generator,
and use :func:`~pyelasticsearch.bulk_chunks()` to divide them into
multiple requests::
from pyelasticsearch import bulk_chunks
def documents():
for book in books:
yield es.index_op({'title': book.title, 'pages': book.pages})
# index_op() also takes kwargs like index= and id= in case
# you want more control.
#
# You could also yield some delete_ops or update_ops here.
# bulk_chunks() breaks your documents into smaller requests for speed:
for chunk in bulk_chunks(documents(),
docs_per_chunk=500,
bytes_per_chunk=10000):
# We specify a default index and doc type here so we don't
# have to repeat them in every operation:
es.bulk(chunk, doc_type='book', index='library')
:arg actions: An iterable of bulk actions, generally the output of
:func:`~pyelasticsearch.bulk_chunks()` but sometimes a list
of calls to :meth:`index_op()`, :meth:`delete_op()`, and
:meth:`update_op()` directly. Specifically, an iterable of
JSON-encoded bytestrings that can be joined with newlines and
sent to ES.
:arg index: Default index to operate on
:arg doc_type: Default type of document to operate on. Cannot be
specified without ``index``.
Return the decoded JSON response on success.
Raise :class:`~pyelasticsearch.exceptions.BulkError` if any of the
individual actions fail. The exception provides enough about the
failed actions to identify them for retrying.
Sometimes there is an error with the request in general, not with
any individual actions. If there is a connection error, timeout,
or other transport error, a more general exception will be raised, as
with other methods; see :ref:`error-handling`.
See `ES's bulk API`_ for more detail.
.. _`ES's bulk API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
# To summarize the flow: index_op() encodes a bytestring.
# bulk_chunks() groups.
# bulk() joins with \n.
if doc_type is not None and index is None:
raise ValueError(
'Please also pass `index` if you pass `doc_type`.')
def is_error(item):
for op, subdict in iteritems(item):
break
return not 200 <= subdict.get('status', 999) < 300
response = self.send_request('POST',
[index, doc_type, '_bulk'],
body='\n'.join(actions) + '\n',
query_params=query_params)
# Sometimes the request worked, but individual actions fail:
if response.get('errors', True): # Try a shortcut to avoid looking
# at every item on success.
errors, successes = [], []
for item in response['items']:
if is_error(item):
errors.append(item)
else:
successes.append(item)
if errors:
raise BulkError(errors, successes)
return response
def index_op(self, doc, doc_type=None, overwrite_existing=True, **meta):
"""
Return a document-indexing operation that can be passed to
:meth:`bulk()`. (See there for examples.)
Specifically, return a 2-line, JSON-encoded bytestring.
:arg doc: A mapping of property names to values.
:arg doc_type: The type of the document to index, if different from
the one you pass to :meth:`bulk()`
:arg overwrite_existing: Whether we should overwrite existing
documents of the same ID and doc type. (If False, this does a
`create` operation.)
:arg meta: Other args controlling how the document is indexed,
like ``id`` (most common), ``index`` (next most common),
``version``, and ``routing``. See `ES's bulk API`_ for details on
these.
.. _`ES's bulk API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
operation = 'index' if overwrite_existing else 'create'
return self._bulk_op(operation, doc=doc, meta=meta, doc_type=doc_type)
def delete_op(self, doc_type=None, **meta):
"""
Return a document-deleting operation that can be passed to
:meth:`bulk()`. ::
def actions():
...
yield es.delete_op(id=7)
yield es.delete_op(id=9,
index='some-non-default-index',
doc_type='some-non-default-type')
...
es.bulk(actions(), ...)
Specifically, return a JSON-encoded bytestring.
:arg doc_type: The type of the document to delete, if different
from the one passed to :meth:`bulk()`
:arg meta: A description of what document to delete and how to do it.
Example: ``{"index": "library", "id": 2, "version": 4}``. See
`ES's bulk API`_ for a list of all the options.
.. _`ES's bulk API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
return self._bulk_op('delete', meta=meta, doc_type=doc_type)
def update_op(self, doc=None, doc_type=None, upsert=None,
doc_as_upsert=None, script=None, params=None, lang=None,
**meta):
"""
Return a document-updating operation that can be passed to
:meth:`bulk()`. ::
def actions():
...
yield es.update_op(doc={'pages': 4},
id=7,
version=21)
...
es.bulk(actions(), ...)
Specifically, return a JSON-encoded bytestring.
:arg doc: A partial document to be merged into the existing document
:arg doc_type: The type of the document to update, if different
from the one passed to :meth:`bulk()`
:arg upsert: The content for the new document created if the
document does not exist
:arg script: The script to be used to update the document
:arg params: A dict of the params to be put in scope of the script
:arg lang: The language of the script. Omit to use the default,
specified by ``script.default_lang``.
:arg meta: Other args controlling what document to update and how
to do it, like ``id``, ``index``, and ``retry_on_conflict``,
destined for the action line itself rather than the payload. See
`ES's bulk API`_ for details on these.
.. _`ES's bulk API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
payload = dict((k, v) for k, v in [('doc', doc), ('upsert', upsert),
('doc_as_upsert', doc_as_upsert),
('script', script), ('params', params),
('lang', lang)] if v is not None)
return self._bulk_op('update',
doc=payload,
meta=meta,
doc_type=doc_type)
def _bulk_op(self, operation, doc=None, meta=None, doc_type=None):
"""
Return an arbitrary bulk indexing operation as a bytestring.
:arg operation: One of 'index', 'delete', 'update', or 'create'
:arg doc: A mapping of fields
:arg meta: A mapping of underscore-prefixed fields with special
meaning to ES, like ``_id`` and ``_type``
:arg doc_type: The value that is to become the ``_type`` field of
the action line. We go to special trouble to keep the name
"doc_type" for consistency with other routines.
"""
def underscore_keys(d):
"""Return a dict with every key prefixed by an underscore."""
return dict(('_%s' % k, v) for k, v in iteritems(d))
if meta is None:
meta = {}
if doc_type is not None:
meta['type'] = doc_type
ret = self._encode_json({operation: underscore_keys(meta)})
if doc is not None:
ret += '\n' + self._encode_json(doc)
return ret
@es_kwargs('consistency', 'refresh', 'replication', 'routing', 'timeout')
def bulk_index(self, index, doc_type, docs, id_field='id',
parent_field='_parent', index_field='_index',
type_field='_type', query_params=None):
"""
Index a list of documents as efficiently as possible.
.. note::
This is deprecated in favor of :meth:`bulk()`, which supports all
types of bulk actions, not just indexing, is compatible with
:func:`~pyelasticsearch.bulk_chunks()` for batching, and has a
simpler, more flexible design.
:arg index: The name of the index to which to add the document. Pass
None if you will specify indices individual in each doc.
:arg doc_type: The type of the document
:arg docs: An iterable of Python mapping objects, convertible to JSON,
representing documents to index
:arg id_field: The field of each document that holds its ID. Removed
from document before indexing.
:arg parent_field: The field of each document that holds its parent ID,
if any. Removed from document before indexing.
:arg index_field: The field of each document that holds the index to
put it into, if different from the ``index`` arg. Removed from
document before indexing.
:arg type_field: The field of each document that holds the doc type it
should become, if different from the ``doc_type`` arg. Removed from
the document before indexing.
Raise :class:`~pyelasticsearch.exceptions.BulkError` if the request as
a whole succeeded but some of the individual actions failed. You can
pull enough about the failed actions out of the exception to identify
them for retrying.
See `ES's bulk API`_ for more detail.
.. _`ES's bulk API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
if not docs:
raise ValueError('No documents provided for bulk indexing!')
meta_fields = [(index_field, 'index'),
(id_field, 'id'),
(parent_field, 'parent')]
def encoded_docs():
for doc in docs:
action = {}
for doc_key, bulk_key in meta_fields:
if doc.get(doc_key) is not None:
action[bulk_key] = doc.pop(doc_key)
yield self.index_op(doc,
doc_type=doc.pop(type_field, None),
**action)
return self.bulk(encoded_docs(),
index=index,
doc_type=doc_type,
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete(self, index, doc_type, id, query_params=None):
"""
Delete a typed JSON document from a specific index based on its ID.
:arg index: The name of the index from which to delete
:arg doc_type: The type of the document to delete
:arg id: The (string or int) ID of the document to delete
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html
"""
# id should never be None, and it's not particular dangerous
# (equivalent to deleting a doc with ID "None", but it's almost
# certainly not what the caller meant:
if id is None or id == '':
raise ValueError('No ID specified. To delete all documents in '
'an index, use delete_all().')
return self.send_request('DELETE', [index, doc_type, id],
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete_all(self, index, doc_type, query_params=None):
"""
Delete all documents of the given doc type from an index.
:arg index: The name of the index from which to delete. ES does not
support this being empty or "_all" or a comma-delimited list of
index names (in 0.19.9).
:arg doc_type: The name of a document type
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html
"""
return self.send_request('DELETE', [index, doc_type],
query_params=query_params)
@es_kwargs('q', 'df', 'analyzer', 'default_operator', 'source' 'routing',
'replication', 'consistency')
def delete_by_query(self, index, doc_type, query, query_params=None):
"""
Delete typed JSON documents from a specific index based on query.
:arg index: An index or iterable thereof from which to delete
:arg doc_type: The type of document or iterable thereof to delete
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter. (Passing the ``q`` kwarg yourself is
deprecated.)
See `ES's delete-by-query API`_ for more detail.
.. _`ES's delete-by-query API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html
"""
if isinstance(query, string_types) and 'q' not in query_params:
query_params['q'] = query
body = ''
else:
body = {'query': query}
return self.send_request(
'DELETE',
[self._concat(index), self._concat(doc_type), '_query'],
body,
query_params=query_params)
@es_kwargs('realtime', 'fields', 'routing', 'preference', 'refresh')
def get(self, index, doc_type, id, query_params=None):
"""
Get a typed JSON document from an index by ID.
:arg index: The name of the index from which to retrieve
:arg doc_type: The type of document to get
:arg id: The ID of the document to retrieve
See `ES's get API`_ for more detail.
.. _`ES's get API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
return self.send_request('GET', [index, doc_type, id],
query_params=query_params)
@es_kwargs()
def multi_get(self, ids, index=None, doc_type=None, fields=None,
query_params=None):
"""
Get multiple typed JSON documents from ES.
:arg ids: An iterable, each element of which can be either an a dict or
an id (int or string). IDs are taken to be document IDs. Dicts are
passed through the Multi Get API essentially verbatim, except that
any missing ``_type``, ``_index``, or ``fields`` keys are filled in
from the defaults given in the ``doc_type``, ``index``, and
``fields`` args.
:arg index: Default index name from which to retrieve
:arg doc_type: Default type of document to get
:arg fields: Default fields to return
See `ES's Multi Get API`_ for more detail.
.. _`ES's Multi Get API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html
"""
doc_template = dict(
filter(
itemgetter(1),
[('_index', index), ('_type', doc_type), ('fields', fields)]))
docs = []
for id in ids:
doc = doc_template.copy()
if isinstance(id, dict):
doc.update(id)
else:
doc['_id'] = id
docs.append(doc)
return self.send_request(
'GET', ['_mget'], {'docs': docs}, query_params=query_params)
@es_kwargs('routing', 'parent', 'timeout', 'replication', 'consistency',
'percolate', 'refresh', 'retry_on_conflict', 'fields')
def update(self, index, doc_type, id, script=None, params=None, lang=None,
query_params=None, doc=None, upsert=None, doc_as_upsert=None):
"""
Update an existing document. Raise ``TypeError`` if ``script``, ``doc``
and ``upsert`` are all unspecified.
:arg index: The name of the index containing the document
:arg doc_type: The type of the document
:arg id: The ID of the document
:arg script: The script to be used to update the document
:arg params: A dict of the params to be put in scope of the script
:arg lang: The language of the script. Omit to use the default,
specified by ``script.default_lang``.
:arg doc: A partial document to be merged into the existing document
:arg upsert: The content for the new document created if the document
does not exist
:arg doc_as_upsert: The provided document will be inserted if the
document does not already exist
See `ES's Update API`_ for more detail.
.. _`ES's Update API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html
"""
if script is None and doc is None and upsert is None:
raise TypeError('At least one of the script, doc, or upsert '
'kwargs must be provided.')
body = {}
if script:
body['script'] = script
if lang and script:
body['lang'] = lang
if doc:
body['doc'] = doc
if upsert:
body['upsert'] = upsert
if params:
body['params'] = params
if doc_as_upsert:
body['doc_as_upsert'] = doc_as_upsert
return self.send_request(
'POST',
[index, doc_type, id, '_update'],
body=body,
query_params=query_params)
def _search_or_count(self, kind, query, index=None, doc_type=None,
query_params=None):
if isinstance(query, string_types):
query_params['q'] = query
body = ''
else:
body = query
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), kind],
body,
query_params=query_params)
@es_kwargs('routing', 'size')
def search(self, query, **kwargs):
"""
Execute a search query against one or more indices and get back search
hits.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
:arg size: Limit the number of results to ``size``. Use with ``es_from`` to
implement paginated searching.
See `ES's search API`_ for more detail.
.. _`ES's search API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html
"""
return self._search_or_count('_search', query, **kwargs)
@es_kwargs('df', 'analyzer', 'default_operator', 'source', 'routing')
def count(self, query, **kwargs):
"""
Execute a query against one or more indices and get hit count.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
See `ES's count API`_ for more detail.
.. _`ES's count API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html
"""
return self._search_or_count('_count', query, **kwargs)
@es_kwargs()
def get_mapping(self, index=None, doc_type=None, query_params=None):
"""
Fetch the mapping definition for a specific index and type.
:arg index: An index or iterable thereof
:arg doc_type: A document type or iterable thereof
Omit both arguments to get mappings for all types and indexes.
See `ES's get-mapping API`_ for more detail.
.. _`ES's get-mapping API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html
"""
# TODO: Think about turning index=None into _all if doc_type is non-
# None, per the ES doc page.
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), '_mapping'],
query_params=query_params)
@es_kwargs('ignore_conflicts')
def put_mapping(self, index, doc_type, mapping, query_params=None):
"""
Register specific mapping definition for a specific type against one or
more indices.
:arg index: An index or iterable thereof
:arg doc_type: The document type to set the mapping of
:arg mapping: A dict representing the mapping to install. For example,
this dict can have top-level keys that are the names of doc types.
See `ES's put-mapping API`_ for more detail.
.. _`ES's put-mapping API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html
"""
# TODO: Perhaps add a put_all_mappings() for consistency and so we
# don't need to expose the "_all" magic string. We haven't done it yet
# since this routine is not dangerous: ES makes you explicily pass
# "_all" to update all mappings.
return self.send_request(
'PUT',
[self._concat(index), doc_type, '_mapping'],
mapping,
query_params=query_params)
@es_kwargs('search_type', 'search_indices', 'search_types',
'search_scroll', 'search_size', 'search_from',
'like_text', 'percent_terms_to_match', 'min_term_freq',
'max_query_terms', 'stop_words', 'min_doc_freq', 'max_doc_freq',
'min_word_len', 'max_word_len', 'boost_terms', 'boost',
'analyzer')
def more_like_this(self, index, doc_type, id, mlt_fields, body='', query_params=None):
"""
Execute a "more like this" search query against one or more fields and
get back search hits.
:arg index: The index to search and where the document for comparison
lives
:arg doc_type: The type of document to find others like
:arg id: The ID of the document to find others like
:arg mlt_fields: The list of fields to compare on
:arg body: A dictionary that will convert to ES's query DSL and be
passed as the request body
See `ES's more-like-this API`_ for more detail.
.. _`ES's more-like-this API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-more-like-this.html
"""
query_params['mlt_fields'] = self._concat(mlt_fields)
return self.send_request('GET',
[index, doc_type, id, '_mlt'],
body=body,
query_params=query_params)
## Index Admin API
@es_kwargs('recovery', 'snapshot')
def status(self, index=None, query_params=None):
"""
Retrieve the status of one or more indices
:arg index: An index or iterable thereof
See `ES's index-status API`_ for more detail.
.. _`ES's index-status API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-status.html
"""
return self.send_request('GET', [self._concat(index), '_status'],
query_params=query_params)
@es_kwargs()
def update_aliases(self, actions, query_params=None):
"""
Atomically add, remove, or update aliases in bulk.
:arg actions: A list of the actions to perform
See `ES's indices-aliases API`_.
.. _`ES's indices-aliases API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
"""
return self.send_request('POST', ['_aliases'],
body={'actions': actions},
query_params=query_params)
@es_kwargs('ignore_unavailable')
def get_aliases(self, index=None, alias='*', query_params=None):
"""
Retrieve a listing of aliases
:arg index: The name of an index or an iterable of indices from which
to fetch aliases. If omitted, look in all indices.
:arg alias: The name of the alias to return or an iterable of them.
Wildcard * is supported. If this arg is omitted, return all aliases.
See `ES's indices-aliases API`_.
.. _`ES's indices-aliases API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
"""
return self.send_request(
'GET',
[self._concat(index), '_aliases', self._concat(alias)],
query_params=query_params)
def aliases(self, *args, **kwargs):
# Deprecated.
return self.get_aliases(*args, **kwargs)
@es_kwargs()
def create_index(self, index, settings=None, query_params=None):
"""
Create an index with optional settings.
:arg index: The name of the index to create
:arg settings: A dictionary of settings
If the index already exists, raise
:class:`~pyelasticsearch.exceptions.IndexAlreadyExistsError`.
See `ES's create-index API`_ for more detail.
.. _`ES's create-index API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
"""
return self.send_request('PUT', [index], body=settings or {},
query_params=query_params)
@es_kwargs()
def delete_index(self, index, query_params=None):
"""
Delete an index.
:arg index: An index or iterable thereof to delete
If the index is not found, raise
:class:`~pyelasticsearch.exceptions.ElasticHttpNotFoundError`.
See `ES's delete-index API`_ for more detail.
.. _`ES's delete-index API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
"""
if not index:
raise ValueError('No indexes specified. To delete all indexes, use'
' delete_all_indexes().')
return self.send_request('DELETE', [self._concat(index)],
query_params=query_params)
def delete_all_indexes(self, **kwargs):
"""Delete all indexes."""
return self.delete_index('_all', **kwargs)
@es_kwargs()
def close_index(self, index, query_params=None):
"""
Close an index.
:arg index: The index to close
See `ES's close-index API`_ for more detail.
.. _`ES's close-index API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
"""
return self.send_request('POST', [index, '_close'],
query_params=query_params)
@es_kwargs()
def open_index(self, index, query_params=None):
"""
Open an index.
:arg index: The index to open
See `ES's open-index API`_ for more detail.
.. _`ES's open-index API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
"""
return self.send_request('POST', [index, '_open'],
query_params=query_params)
@es_kwargs()
def get_settings(self, index, query_params=None):
"""
Get the settings of one or more indexes.
:arg index: An index or iterable of indexes
See `ES's get-settings API`_ for more detail.
.. _`ES's get-settings API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html
"""
return self.send_request('GET',
[self._concat(index), '_settings'],
query_params=query_params)
@es_kwargs()
def update_settings(self, index, settings, query_params=None):
"""
Change the settings of one or more indexes.
:arg index: An index or iterable of indexes
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html
"""
if not index:
raise ValueError('No indexes specified. To update all indexes, use'
' update_all_settings().')
# If we implement the "update cluster settings" API, call that
# update_cluster_settings().
return self.send_request('PUT',
[self._concat(index), '_settings'],
body=settings,
query_params=query_params)
@es_kwargs()
def update_all_settings(self, settings, query_params=None):
"""
Update the settings of all indexes.
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html
"""
return self.send_request('PUT', ['_settings'], body=settings,
query_params=query_params)
@es_kwargs('refresh')
def flush(self, index=None, query_params=None):
"""
Flush one or more indices (clear memory).
:arg index: An index or iterable of indexes
See `ES's flush API`_ for more detail.
.. _`ES's flush API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
"""
return self.send_request('POST',
[self._concat(index), '_flush'],
query_params=query_params)
@es_kwargs()
def refresh(self, index=None, query_params=None):
"""
Refresh one or more indices.
:arg index: An index or iterable of indexes
See `ES's refresh API`_ for more detail.
.. _`ES's refresh API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html
"""
return self.send_request('POST', [self._concat(index), '_refresh'],
query_params=query_params)
@es_kwargs()
def gateway_snapshot(self, index=None, query_params=None):
"""
Gateway snapshot one or more indices.
:arg index: An index or iterable of indexes
See `ES's gateway-snapshot API`_ for more detail.
.. _`ES's gateway-snapshot API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot.html
"""
return self.send_request(
'POST',
[self._concat(index), '_gateway', 'snapshot'],
query_params=query_params)
@es_kwargs('max_num_segments', 'only_expunge_deletes', 'refresh', 'flush',
'wait_for_merge')
def optimize(self, index=None, query_params=None):
"""
Optimize one or more indices.
:arg index: An index or iterable of indexes
See `ES's optimize API`_ for more detail.
.. _`ES's optimize API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-optimize.html
"""
return self.send_request('POST',
[self._concat(index), '_optimize'],
query_params=query_params)
@es_kwargs('level', 'wait_for_status', 'wait_for_relocating_shards',
'wait_for_nodes', 'timeout')
def health(self, index=None, query_params=None):
"""
Report on the health of the cluster or certain indices.
:arg index: The index or iterable of indexes to examine
See `ES's cluster-health API`_ for more detail.
.. _`ES's cluster-health API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
"""
return self.send_request(
'GET',
['_cluster', 'health', self._concat(index)],
query_params=query_params)
@es_kwargs('local')
def cluster_state(self, metric='_all', index='_all', query_params=None):
"""
Return state information about the cluster.
:arg metric: Which metric to return: one of "version", "master_node",
"nodes", "routing_table", "meatadata", or "blocks", an iterable
of them, or a comma-delimited string of them. Defaults to all
metrics.
:arg index: An index or iterable of indexes to return info about
See `ES's cluster-state API`_ for more detail.
.. _`ES's cluster-state API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html
"""
return self.send_request(
'GET',
['_cluster', 'state', self._concat(metric), self._concat(index)],
query_params=query_params)
@es_kwargs('routing', 'preference', 'ignore_unavailable',
'percolate_format')
def percolate(self, index, doc_type, doc, query_params=None):
"""
Run a JSON document through the registered percolator queries, and
return which ones match.
:arg index: The name of the index to which the document pretends to
belong
:arg doc_type: The type the document should be treated as if it has
:arg doc: A Python mapping object, convertible to JSON, representing
the document
Use :meth:`index()` to register percolators. See `ES's percolate API`_
for more detail.
.. _`ES's percolate API`:
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html#_percolate_api
"""
return self.send_request('GET',
[index, doc_type, '_percolate'],
doc,
query_params=query_params)
def _iso_datetime(value):
"""
If value appears to be something datetime-like, return it in ISO format.
Otherwise, return None.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
return value.isoformat()
else:
return '%sT00:00:00' % value.isoformat()
|
the-stack_0_27948
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import django_sysadmin
import os
from setuptools import setup, find_packages
## dependencies
install_requires = [
'Django>=1.4.1',
'wsgiref==0.1.2',
]
packages = find_packages()
setup(
name='django-sysadmin',
version=django_sysadmin.__version__,
author='Bernhard Maeser',
author_email='[email protected]',
url='https://github.com/bmaeser/django-sysadmin',
license="MIT",
description="Django models to make a sysadmins life easier",
long_description=open('README.rst').read(),
packages = packages,
include_package_data=True,
install_requires = install_requires,
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'Topic :: System :: Systems Administration',
'Topic :: System :: Installation/Setup',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
)
|
the-stack_0_27949
|
import os
import detect_and_align
from scipy import misc
import numpy as np
class ID_Data():
def __init__(self, name, image_path):
self.name = name
self.image_path = image_path
self.embedding = []
def get_id_data(id_folder, pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder):
id_dataset = []
ids = os.listdir(os.path.expanduser(id_folder))
ids.sort()
for id_name in ids:
id_dir = os.path.join(id_folder, id_name)
image_names = os.listdir(id_dir)
image_paths = [os.path.join(id_dir, img) for img in image_names]
for image_path in image_paths:
id_dataset.append(ID_Data(id_name, image_path))
aligned_images = align_id_dataset(id_dataset, pnet, rnet, onet)
feed_dict = {images_placeholder: aligned_images, phase_train_placeholder: False}
emb = sess.run(embeddings, feed_dict=feed_dict)
for i in range(len(id_dataset)):
id_dataset[i].embedding = emb[i, :]
return id_dataset
def align_id_dataset(id_dataset, pnet, rnet, onet):
aligned_images = []
for i in range(len(id_dataset)):
image = misc.imread(os.path.expanduser(id_dataset[i].image_path), mode='RGB')
face_patches, _, _ = detect_and_align.align_image(image, pnet, rnet, onet)
aligned_images = aligned_images + face_patches
aligned_images = np.stack(aligned_images)
return aligned_images
if __name__ == "__main__":
main()
|
the-stack_0_27950
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://serviceusage.googleapis.com/v1beta1/'
DOCS_URL = 'https://cloud.google.com/service-usage/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
OPERATIONS = (
'operations',
'{+name}',
{
'':
'operations/{operationsId}',
},
['name'],
True
)
SERVICES = (
'services',
'{+name}',
{
'':
'{v1beta1Id}/{v1beta1Id1}/services/{servicesId}',
},
['name'],
True
)
SERVICES_CONSUMERQUOTAMETRICS = (
'services.consumerQuotaMetrics',
'{+name}',
{
'':
'{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/'
'consumerQuotaMetrics/{consumerQuotaMetricsId}',
},
['name'],
True
)
SERVICES_CONSUMERQUOTAMETRICS_LIMITS = (
'services.consumerQuotaMetrics.limits',
'{+name}',
{
'':
'{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/'
'consumerQuotaMetrics/{consumerQuotaMetricsId}/limits/'
'{limitsId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
the-stack_0_27951
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import gettext
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
language = gettext.translation('electrum', LOCALE_DIR, fallback=True)
def _(x):
global language
dic = [('Bitcoin', 'Sugarchain'), ('bitcoin', 'sugarchain'), ('bitcoins', 'sugarchains'), ('mBTC/kB', 'mSUGAR/kB'), ('ビットコイン', 'シュガーチェーン')]
for b, m in dic:
x = x.replace(m, b)
t = language.gettext(x)
for b, m in dic:
t = t.replace(b, m)
return t
def set_language(x):
global language
if x:
language = gettext.translation('electrum', LOCALE_DIR, fallback=True, languages=[x])
languages = {
'': _('Default'),
'ar_SA': _('Arabic'),
'bg_BG': _('Bulgarian'),
'cs_CZ': _('Czech'),
'da_DK': _('Danish'),
'de_DE': _('German'),
'el_GR': _('Greek'),
'eo_UY': _('Esperanto'),
'en_UK': _('English'),
'es_ES': _('Spanish'),
'fa_IR': _('Persian'),
'fr_FR': _('French'),
'hu_HU': _('Hungarian'),
'hy_AM': _('Armenian'),
'id_ID': _('Indonesian'),
'it_IT': _('Italian'),
'ja_JP': _('Japanese'),
'ky_KG': _('Kyrgyz'),
'lv_LV': _('Latvian'),
'nb_NO': _('Norwegian Bokmal'),
'nl_NL': _('Dutch'),
'pl_PL': _('Polish'),
'pt_BR': _('Brasilian'),
'pt_PT': _('Portuguese'),
'ro_RO': _('Romanian'),
'ru_RU': _('Russian'),
'sk_SK': _('Slovak'),
'sl_SI': _('Slovenian'),
'sv_SE': _('Swedish'),
'ta_IN': _('Tamil'),
'th_TH': _('Thai'),
'tr_TR': _('Turkish'),
'uk_UA': _('Ukrainian'),
'vi_VN': _('Vietnamese'),
'zh_CN': _('Chinese Simplified'),
'zh_TW': _('Chinese Traditional')
}
|
the-stack_0_27952
|
from __future__ import print_function
from giggle import Giggle
import os
import sys
HERE = os.path.dirname(__file__)
INDEX = os.path.join(HERE, "test-index")
def teardown():
import shutil
if os.path.exists(INDEX):
shutil.rmtree(INDEX)
def setup():
import shutil
if os.path.exists(INDEX):
shutil.rmtree(INDEX)
Giggle.create(INDEX, "lib/giggle/test/data/many/*.bed.gz")
def test_load():
g = Giggle(INDEX)
def test_query():
g = Giggle(INDEX)
res = g.query('chr1', 14135106, 16279607)
assert res.n_total_hits == 65, res.n_total_hits
tot = 0
for i in range(0, res.n_files):
tot += res.n_hits(i)
assert tot == res.n_total_hits
def test_result_iter():
g = Giggle(INDEX)
res = g.query('chr1', 14135106, 16279607)
lines = 0
for i in range(0, res.n_files):
for r in res[i]:
lines += 1
assert lines == res.n_total_hits, (lines, res.n_total_hits)
def test_files():
g = Giggle(INDEX)
assert len(g.files) != 0, g.files
for f in g.files:
assert f.endswith(".bed.gz")
def test_leak():
g = Giggle(INDEX)
res = g.query('chr1', 4135106, 116279607)
j = 0
while j < 1000000:
j += 1
tot = 0
for i in range(0, res.n_files):
tot += res.n_hits(i)
assert tot == res.n_total_hits
|
the-stack_0_27953
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=np.array([[50, 9, 4, 1, 0, 0, 40, 0]])
#Code starts here
data = np.genfromtxt(path, delimiter=",", skip_header=1)
census = np.array(np.concatenate((data, new_record)))
# --------------
#Code starts here
age = np.array(census[:,0])
max_age = np.max(age)
min_age = np.min(age)
age_mean = (np.sum(age)/(np.size(age)))
age_std = np.std(age)
# --------------
#Code starts here
#Race = (census[:,2])
#print(Race)
race_0 = (census[census[:,2]==0])
race_1 = (census[census[:,2]==1])
race_2 = (census[census[:,2]==2])
race_3 = (census[census[:,2]==3])
race_4 = (census[census[:,2]==4])
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
minority = min([len_0, len_1, len_2, len_3, len_4])
if minority == len_0:
minority_race = 0
elif minority == len_1:
minority_race = 1
elif minority == len_2:
minority_race = 2
elif minority == len_3:
minority_race = 3
else:
minority_race = 4
print(minority_race)
# --------------
#Code starts here
senior_citizens = (census[census[:,0]>60])
#print(senior_citizens)
working_hours= senior_citizens[:,6]
#print(working_hours)
working_hours_sum = sum(working_hours)
#print(working_hours_sum)
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(avg_pay_high, avg_pay_low)
|
the-stack_0_27954
|
# PyDIP 3.0, Python bindings for DIPlib 3.0
#
# (c)2017-2019, Flagship Biosciences, Inc., written by Cris Luengo.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The portion of the PyDIP module that contains Python code.
"""
import PyDIP
import importlib.util
import os.path
hasMatPlotLib = True
if importlib.util.find_spec('matplotlib') is None:
print("""
PyDIP requires matplotlib for its display functionality. Matplotlib was not found
on your system. Image display (PyDIP.Show and PyDIP.Image.Show) will not do anything.
You can install matplotlib by typing on your Linux/MacOS command prompt:
pip3 install matplotlib
or under Windows:
python3 -m pip install matplotlib
""")
hasMatPlotLib = False
else:
import matplotlib
import matplotlib.pyplot as pp
import numpy as np
# Label color map from the function of the same name in DIPimage:
def _label_colormap():
if hasMatPlotLib:
cm = np.array([
[1.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000],
[0.0000, 0.0000, 1.0000],
[1.0000, 1.0000, 0.0000],
[0.0000, 1.0000, 1.0000],
[1.0000, 0.0000, 1.0000],
[1.0000, 0.3333, 0.0000],
[0.6667, 1.0000, 0.0000],
[0.0000, 0.6667, 1.0000],
[0.3333, 0.0000, 1.0000],
[1.0000, 0.0000, 0.6667],
[1.0000, 0.6667, 0.0000],
[0.0000, 1.0000, 0.5000],
[0.0000, 0.3333, 1.0000],
[0.6667, 0.0000, 1.0000],
[1.0000, 0.0000, 0.3333],
])
n = len(cm)
index = list(i % n for i in range(0, 255))
cm = np.concatenate((np.array([[0, 0, 0]]), cm[index]))
return matplotlib.colors.ListedColormap(cm)
return None
def Show(img, range=(), complexMode='abs', projectionMode='mean', coordinates=(), dim1=0, dim2=1, colormap=''):
"""Show an image in the current pyplot window
Keyword arguments:
range -- a 2-tuple indicating the range of input values to map to the
output range, or a string indicating how to compute the range and
how to map. Valid strings are:
- `'unit'`: use the `(0,1)` range.
- `'8bit'` or `'normal'`: use the `(0,255)` range.
- `'12bit'`: use the `(0,2**12)` range.
- `'16bit'`: use the `(0,2**16)` range.
- `'s8bit'`: use the `(-128,127)` range.
- `'s12bit'`: use the `(-2**11,12**11-1)` range.
- `'s16bit'`: use the `(-2**15,12**15-1)` range.
- `'angle'`: use the `(0,2*pi)` range, with folding of out-of-
range values by modulo operation. Additionally, it sets the
color map such that 0 and 2*pi are shown in the same color.
- `'orientation'`: use the `(0,pi)` range, with folding of out-of-
range values by modulo operation. Additionally, it sets the
color map such that 0 and pi are shown in the same color.
- `'lin'` or `'all'`: use the range from lowest to highest value in
`img`. This is the default.
- `'percentile'`: use the range from 5th to 95th percentile value
in `img`.
- `'base'` or `'based'`: like 'lin', but setting the value of 0 to
the middle of the output range. Additionally, it sets the color
map to `'coolwarm'`, such that negative and positive values
have blue and red colors, respectively, and 0 is a neutral
gray.
- `'log'`: use a logarithmic mapping.
- `'modulo'` or `'labels'`: use the `(0,255)` range, with folding
of out-of-range values by modulo operation. Additionally, it
sets the color map such that nearby values get very different
colors. This mode is suitable for labeled images.
complexMode -- a string indicating how to convert complex values to
real values for display. One of `'abs'` or `'magnitude'`,
`'phase'`, `'real'`, `'imag'`. The default is `'abs'`.
projectionMode -- a string indicating how to extract a 2D slice from a
multi-dimensional image for display. One of `'slice'`, `'max'`,
`'mean'`. The default is `'mean'`.
coordinates -- Coordinates of a pixel to be shown, as a tuple with as
many elements as image dimensions. Determines which slice is shown
out of a multi-dimensional image.
dim1 -- Image dimension to be shown along x-axis of display.
dim2 -- Image dimension to be shown along y-axis of display.
colormap -- Name of a color map to use for display.
For images with more than 2 dimensions, a slice is extracted for
display. The direction of the slice is determined using the `dim1` and
`dim2` parameters, and the location using the `coordinates` parameter.
If `projectionMode` is `'slice'`, then the single slice is shown. If
`projectionMode` is `'max'` or `'mean'`, then a projection is computed
across the full image volume along the non-displayed dimensions.
For 1D images, or if `dim1==dim2`, a line is plotted. In this case, the
`colormap` is ignored. Note that, if `dim1==dim2`, a 2D image is also
projected as described above for higher-dimensional images.
"""
if hasMatPlotLib:
out = PyDIP.ImageDisplay(img, range, complexMode, projectionMode, coordinates, dim1, dim2)
if out.Dimensionality() == 1:
axes = pp.gca()
axes.clear()
axes.plot(out)
axes.set_ylim((0, 255))
axes.set_xlim((0, out.Size(0) - 1))
else:
if colormap == '':
if range == 'base' or range == 'based':
colormap = 'coolwarm'
elif range == 'modulo' or range == 'labels':
colormap = 'labels'
elif range == 'angle' or range == 'orientation':
colormap = 'hsv'
else:
colormap = 'gray'
if colormap == 'labels':
cmap = _label_colormap()
else:
cmap = pp.get_cmap(colormap)
pp.imshow(out, cmap=cmap, norm=matplotlib.colors.NoNorm(), interpolation='none')
pp.show(block=False)
PyDIP.Image.Show = Show
def ImageRead(filename, format=''):
"""Reads the image from the file called filename.
format can be one of:
- 'ics': The file is an ICS file, use PyDIP.ImageReadICS.
- 'tiff': The file is a TIFF file, use PyDIP.ImageReadTIFF. Reads only
the first image plane.
- 'jpeg': The file is a JPEG file, use PyDIP.ImageReadJPEG.
- 'bioformats': Use PyDIP.javaio.ImageReadJavaIO to read the file with
the Bio-Formats library.
- '': Select the format by looking at the file name extension. This is
the default.
Use the filetype-specific functions directly for more control over how
the image is read.
"""
if format == '':
base, ext = os.path.splitext(filename)
ext = ext.lower()
if ext == '.ics' or ext == '.ids':
format = 'ics'
elif ext == '.tif' or ext == '.tiff':
format = 'tiff'
elif ext == '.jpg' or ext == '.jpeg':
format = 'jpeg'
else:
format = 'bioformats'
if format == 'ics':
return PyDIP.ImageReadICS(filename)
if format == 'tiff':
return PyDIP.ImageReadTIFF(filename)
if format == 'jpeg':
return PyDIP.ImageReadJPEG(filename)
if format == 'bioformats':
if not PyDIP.hasDIPjavaio:
raise ValueError('Bio-Formats not available')
return PyDIP.javaio.ImageReadJavaIO(filename)
raise ValueError('Unknown format')
def ImageWrite(image, filename, format='', compression=''):
"""Writes image to a file called filename.
format can be one of:
- 'ics' or 'icsv2': Create an ICS version 2 file, use
PyDIP.ImageWriteICS.
- 'icsv1': Create an ICS version 1 file, use PyDIP.ImageWriteICS.
- 'tiff': Create a TIFF file, use PyDIP.ImageWriteTIFF.
- 'jpeg': Create a JPEG file, use PyDIP.ImageWriteJPEG.
- '': Select the format by looking at the file name extension.
If no extension is present, it defaults to ICS version 2.
This is the default.
The ICS format can store any image, with all its information, such that
reading the file using PyDIP.ImageRead or PyDIP.ImageReadICS yields an
image that is identical (except the strides might be different).
The TIFF format can store 2D images, as well as 3D images as a series
of 2D slides (not yet implemented). Most metadata will be lost. Complex
data is not supported, other data types are. But note that images other
than 8-bit or 16-bit unsigned integer lead to files that are not
recognized by most readers.
The JPEG format can store 2D images. Tensor images are always tagged as
RGB. Most metadata will be lost. Image data is converted to 8-bit
unsigned integer, without scaling.
compression determines the compression method used when writing
the pixel data. It can be one of the following strings:
- 'none': no compression.
- '': gzip compression (default). TIFF files with gzip compression are
not universally recognized.
- 'LZW', 'PackBits', 'JPEG': compression formats supported only by
the TIFF format.
For the JPEG format, compression is ignored.
Use the filetype-specific functions directly for more control over how
the image is written. See those functions for more information about
the file types.
"""
if format == '':
base, ext = os.path.splitext(filename)
ext = ext.lower()
if ext == '.ics' or ext == '.ids':
format = 'ics'
elif ext == '.tif' or ext == '.tiff':
format = 'tiff'
elif ext == '.jpg' or ext == '.jpeg':
format = 'jpeg'
else:
raise ValueError('File extension not recognized')
options = set()
if format == 'icsv2':
format = 'ics'
elif format == 'icsv1':
format = 'ics'
options.add('v1')
if format == 'ics':
if compression == '':
options.add('gzip')
elif compression == 'none':
options.add('uncompressed')
else:
raise ValueError('Compression flag not valid for ICS file')
return PyDIP.ImageWriteICS(image, filename, options=options)
if format == 'tiff':
return PyDIP.ImageWriteTIFF(image, filename, compression)
if format == 'jpeg':
return PyDIP.ImageWriteJPEG(image, filename)
raise ValueError('Unknown format')
|
the-stack_0_27955
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER FEC CMTE ID (PCC)', 'number': '2'},
{'name': 'COMMITTEE NAME (PCC)', 'number': '3'},
{'name': 'Coverage From', 'number': '4-'},
{'name': 'Coverage To', 'number': '5-'},
{'name': 'FEC COMMITTEE ID NUMBER (Auth)', 'number': '6'},
{'name': 'COMMITTEE NAME (Auth)', 'number': '7'},
{'name': 'individuals total', 'number': '8-(a) 11(a)iii'},
{'name': 'Political party committees', 'number': '9-(b) 11(b)'},
{'name': 'other pol. committees (PACs)', 'number': '10-(c) 11(c)'},
{'name': 'the candidate', 'number': '11-(d) 11(d)'},
{'name': 'total contributions', 'number': '12-(e) 11(e)'},
{'name': 'Transfers from other auth Committees', 'number': '13-(f) 12.'},
{'name': 'made or guarn. by candidate', 'number': '14-(g) 13(a)'},
{'name': 'all other loans', 'number': '15-(h) 13(b)'},
{'name': 'total loans', 'number': '16-(i) 13(c)'},
{'name': 'offsets to operating expend', 'number': '17-(j) 14.'},
{'name': 'Other receipts', 'number': '18-(k) 15.'},
{'name': 'total receipts', 'number': '19-(l) 16.'},
{'name': 'Operating Expenditures', 'number': '20-(m) 17.'},
{'name': 'Transfers to other auth Committees', 'number': '21-(n) 18.'},
{'name': 'made or guaranteed by cand', 'number': '22-(o) 19(a)'},
{'name': 'all other loans', 'number': '23-(p) 19(b)'},
{'name': 'total loan repayments', 'number': '24-(q) 19(c)'},
{'name': 'total refunds individuals', 'number': '25-(r) 20(a)'},
{'name': 'Refunds Political Party Committees', 'number': '26-(s) 20(b)'},
{'name': 'Refunds other Political Committees', 'number': '27-(t) 20(c)'},
{'name': 'total contribution refunds', 'number': '28-(u) 20(d)'},
{'name': 'Other disbursements', 'number': '29-(v) 21.'},
{'name': 'Total disbursements', 'number': '30-(w) 22.'},
{'name': 'COH beginning reporting period', 'number': '31-(x) 23.'},
{'name': 'COH at close of period', 'number': '32-(y) 27.'},
{'name': 'Debts to', 'number': '33-(z) 9.'},
{'name': 'Debts by', 'number': '34-(aa) 12.'},
{'name': 'net contributions', 'number': '35-(bb) 6(c)'},
{'name': 'net operating expenditures', 'number': '36-(cc) 7(c)'},
]
self.fields_names = self.hash_names(self.fields)
|
the-stack_0_27960
|
import sys
import os.path as osp
from vis_utils import *
sys.path.append(osp.dirname(osp.dirname(__file__)))
from demo_toolkit import *
import os
import numpy as np
from keypoints.get_keypoints import *
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
pdir_path = osp.dirname(osp.dirname(__file__))
os.environ['CUDA_VISIBLE_DEVICES'] = "1"
if __name__ == "__main__":
data_path = "/home/wj/ai/mldata/keypoints_data/test_imgs"
files = glob.glob(osp.join(data_path,"*.png"))
save_dir = "/home/wj/ai/mldata/0day/tmp/kps"
if not osp.exists(save_dir):
os.makedirs(save_dir)
model = KPDetection()
for file in files:
img = cv2.imread(file)
img = img[...,::-1]
ans = model(img)
img = show_keypoints(img,ans,threshold=0.2)
save_path = osp.join(save_dir,osp.basename(file))
cv2.imwrite(save_path,img)
|
the-stack_0_27961
|
from yamo import *
class E(EmbeddedDocument):
a = StringField()
b = StringField()
class Q(Document):
es = ListField(EmbeddedField(E))
Connection().register_all()
def test_embedded():
Q.drop()
q = Q({'es': [{'a': 'a', 'b': 'b'}]})
q.save()
if __name__ == '__main__':
test_embedded()
|
the-stack_0_27962
|
"""Example showing how to build an API on top of Gemini using Scooter."""
import base64
from io import BytesIO
import numpy as np
import tensorflow as tf
from keras.models import Model, load_model
from keras.preprocessing.image import img_to_array
from PIL import Image
from sklearn.neighbors import NearestNeighbors
from gemini.cards import read_card_list
from gemini.image import flatten
from scooter.model_server import start_model_server
from scooter.web_server import start_web_server
IMAGE_WIDTH = 320
IMAGE_HEIGHT = 448
IMAGE_CHANS = 3
IMAGE_DTYPE = "float32"
class Classifier(object):
"""Class for classifying Magic cards from photos."""
def __init__(self, cards, knn, encoder, graph):
"""Initialize a classifier."""
self._cards = cards
self._knn = knn
self._encoder = encoder
self._graph = graph
self._encoded_dim = np.prod(self._encoder.output.shape[1:]).value
def predict(self, batch):
# TODO: Clean this up
print("encoding user image")
encoded = np.zeros((1, self._encoded_dim))
with self._graph.as_default():
batch_encoded = self._encoder.predict(batch)
batch_encoded_flat = flatten(batch_encoded)
encoded[0, :] = batch_encoded_flat
print(encoded.shape)
print("finding most similar")
most_similar = self._knn.kneighbors(encoded, return_distance=True)
print("found most similar")
print("batch shape:", batch.shape)
print("most_similar length:", len(most_similar))
results = []
for i in range(batch.shape[0]):
scores = most_similar[0][i]
indices = most_similar[1][i]
k_val = scores.shape[0]
temp_results = []
for top_pos in range(k_val):
card_index = indices[top_pos]
temp = (top_pos + 1, self._cards[card_index].name + " / " + self._cards[card_index].edition,
scores[top_pos])
temp_results.append(temp)
results.append(temp_results)
print("results length", len(results))
print("returning results")
return results
def load_classifier(card_list_file, encodings_file, encoder_file):
print("loading cards")
cards = read_card_list(card_list_file)
print("loading encodings")
encoded_img = np.load(encodings_file)
print("encodings shape: %s", encoded_img.shape)
print("fitting knn")
knn = NearestNeighbors(n_neighbors=3, algorithm="brute", metric="cosine")
knn.fit(encoded_img)
print("loading encoder model")
model = load_model(encoder_file)
encoder = Model(inputs=model.input, outputs=model.get_layer('encoder_output').output)
graph = tf.get_default_graph()
print("classifier loaded")
return Classifier(cards, knn, encoder, graph)
def load_gemini_model():
print("Loading Gemini model...")
card_list_file = "/users/sorenlind/Data/mtg/cards_en.csv"
encodings_file = "/users/sorenlind/Data/mtg/encodings/encoded_master_v03_no_aug.npy"
encoder_file = "/users/sorenlind/Data/mtg/models/gcp/autoencoder_master_v03_no_aug/autoencoder_master_v03_no_aug"
model = load_classifier(card_list_file, encodings_file, encoder_file)
print("Gemini model loaded")
return model
def decode_sample(base64_image):
loaded_image_data = Image.open(BytesIO(base64.b64decode(base64_image)))
assert loaded_image_data.size == (320, 448)
image = img_to_array(loaded_image_data)
image = np.expand_dims(image, axis=0)
image = image.astype('float32') / 255.
return image
def decode_predictions(predictions):
#return imagenet_utils.decode_predictions(predictions)
return predictions
if __name__ == "__main__":
start_model_server(load_gemini_model, decode_sample, decode_predictions)
start_web_server(debug=None)
|
the-stack_0_27963
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from models.data_parallel import DataParallel
from utils.utils import AverageMeter
class ModelWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModelWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModelWithLoss(model, self.loss)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
print('----------------->>> run_epoch step 1')
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
# print('----------------->>> run_epoch step 2')
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
# print('----------------->>> run_epoch step 3',num_iters)
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
# print('---------------------------------->>>> train data_loader')
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
self.debug(batch, output, iter_id)
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def debug(self, batch, output, iter_id):
raise NotImplementedError
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader)
|
the-stack_0_27964
|
"""
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
import ipaddress
import six
import socket
import requests
import warnings
from sentry import options
from django.conf import settings
from requests.adapters import HTTPAdapter
from requests.exceptions import SSLError
from six.moves.urllib.parse import urlparse
from sentry.exceptions import RestrictedIPAddress
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
USER_AGENT = 'sentry/{version} (https://sentry.io)'.format(
version=sentry.VERSION,
)
DISALLOWED_IPS = {
ipaddress.ip_network(six.text_type(i), strict=False)
for i in settings.SENTRY_DISALLOWED_IPS
}
def get_server_hostname():
return urlparse(options.get('system.url-prefix')).hostname
def is_valid_url(url):
"""
Tests a URL to ensure it doesn't appear to be a blacklisted IP range.
"""
# If we have no disallowed ips, we can skip any further validation
# and there's no point in doing a DNS lookup to validate against
# an empty list.
if not DISALLOWED_IPS:
return True
parsed = urlparse(url)
if not parsed.hostname:
return False
server_hostname = get_server_hostname()
if parsed.hostname == server_hostname:
return True
# NOTE: The use of `socket.gethostbyname` is slightly flawed.
# `gethostbyname` doesn't handle octal IP addresses correctly, nor
# does it fetch all of the IP addresses for the record.
# `getaddrinfo` does the correct thing with octals here, and also returns all
# ip addresses for the hostname.
#
# WARNING: This behavior is only correct on Linux. On OSX, `getaddrinfo` also
# returns the wrong IP.
#
# The following should all technically resolve to `127.0.0.1`:
# Python 2.7.11 Linux
# >>> socket.gethostbyname('0177.0000.0000.0001')
# '177.0.0.1'
# >>> socket.getaddrinfo('0177.0000.0000.0001', 0)[0]
# (2, 1, 6, '', ('127.0.0.1', 0))
# Python 2.7.11 macOS
# >>> socket.gethostbyname('0177.0000.0000.0001')
# '177.0.0.1'
# >>> socket.getaddrinfo('0177.0000.0000.0001', None)[0]
# (2, 2, 17, '', ('177.0.0.1', 0))
try:
ip_addresses = set(addr for _, _, _, _, addr in socket.getaddrinfo(parsed.hostname, 0))
except socket.gaierror:
return False
for addr in ip_addresses:
ip_address = addr[0]
if ip_address == server_hostname:
return True
ip_address = ipaddress.ip_address(six.text_type(ip_address))
for ip_network in DISALLOWED_IPS:
if ip_address in ip_network:
return False
return True
class BlacklistAdapter(HTTPAdapter):
def send(self, request, *args, **kwargs):
if not is_valid_url(request.url):
raise RestrictedIPAddress('%s matches the URL blacklist' % (request.url,))
return super(BlacklistAdapter, self).send(request, *args, **kwargs)
def build_session():
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.mount('https://', BlacklistAdapter())
session.mount('http://', BlacklistAdapter())
return session
def safe_urlopen(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = build_session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
try:
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
def safe_urlread(response):
return response.content
|
the-stack_0_27965
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def getDecimalValue(self, head: ListNode) -> int:
'''
T: O(n) and S: O(1)
'''
def reverseLinkedList(head):
prev = None
tail = head
while tail:
temp = tail.next
tail.next = prev
prev = tail
tail = temp
return prev
head = reverseLinkedList(head)
i, decimal = 0, 0
tail = head
while tail:
if tail.val:
decimal += tail.val * 2 ** i
tail = tail.next
i += 1
return decimal
|
the-stack_0_27967
|
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Reshape, Concatenate
from keras.layers import Input, Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers import SimpleRNN, LSTM, GRU
from keras.optimizers import SGD
import cv2, numpy as np
def get_symbol(input_shape1,input_shape2,weights_path=None):
input1 = Input(shape=input_shape1)
input2 = Input(shape=input_shape2)
# group 1
padd1_1=ZeroPadding2D((1,0))(input1)
conv1_1=Conv2D(64, (3, 1), activation='relu')(padd1_1)
padd1_2=ZeroPadding2D((0,1))(conv1_1)
conv1_2=Conv2D(64, (1, 3), activation='relu')(padd1_2)
pool1=MaxPooling2D((2,2), strides=(2,2))(conv1_2)
# group 2
padd2_1=ZeroPadding2D((1,0))(pool1)
conv2_1=Conv2D(128, (3, 1), activation='relu')(padd2_1)
padd2_2=ZeroPadding2D((0,1))(conv2_1)
conv2_2=Conv2D(128, (1, 3), activation='relu')(padd2_2)
padd2_3=ZeroPadding2D((1,0))(conv2_2)
conv2_3=Conv2D(128, (3, 1), activation='relu')(padd2_3)
padd2_4=ZeroPadding2D((0,1))(conv2_3)
conv2_4=Conv2D(128, (1, 3), activation='relu')(padd2_4)
pool2=MaxPooling2D((2,2), strides=(2,2))(conv2_4)
flat1=Flatten()(pool2)
dens1=Dense(1024, activation='relu')(flat1)
drop1=Dropout(0.5)(dens1)
rnn=SimpleRNN(units=128,dropout=0.2,return_sequences=True)(input2)
flatr=Flatten()(rnn)
densr=Dense(256,activation='relu')(flatr)
dropr=Dropout(0.5)(densr)
concat=Concatenate(axis=1)([drop1,dropr])
dens2=Dense(2048, activation='relu')(concat)
drop2=Dropout(0.5)(dens2)
out=Dense(2,activation='softmax')(drop2)
if weights_path:
model.load_weights(weights_path)
return Model([input1,input2],out)
def rc():
return "rc"
|
the-stack_0_27968
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Comment")
for comment in frappe.get_all('Communication', fields = ['*'],
filters = dict(communication_type = 'Comment')):
new_comment = frappe.new_doc('Comment')
new_comment.comment_type = comment.comment_type
new_comment.comment_email = comment.sender
new_comment.comment_by = comment.sender_full_name
new_comment.subject = comment.subject
new_comment.content = comment.content or comment.subject
new_comment.reference_doctype = comment.reference_doctype
new_comment.reference_name = comment.reference_name
new_comment.link_doctype = comment.link_doctype
new_comment.link_name = comment.link_name
new_comment.creation = comment.creation
new_comment.modified = comment.modified
new_comment.owner = comment.owner
new_comment.modified_by = comment.modified_by
new_comment.db_insert()
# clean up
frappe.db.sql('delete from tabCommunication where communication_type = "Comment"')
|
the-stack_0_27969
|
#!/usr/bin/env python
#
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base85 encoding of a zip file, this zip file contains
# an entire copy of pip (version {installed_version}).
#
# Pip is a thing that installs packages, pip itself is a package that someone
# might want to install, especially if they're looking to run this get-pip.py
# script. Pip has a lot of code to deal with the security of installing
# packages, various edge cases on various platforms, and other such sort of
# "tribal knowledge" that has been encoded in its code base. Because of this
# we basically include an entire copy of pip inside this blob. We do this
# because the alternatives are attempt to implement a "minipip" that probably
# doesn't do things correctly and has weird edge cases, or compress pip itself
# down into a single file.
#
# If you're wondering how this is created, it is using an invoke task located
# in tasks/generate.py called "installer". It can be invoked by using
# ``invoke generate.installer``.
import os.path
import pkgutil
import shutil
import sys
import struct
import tempfile
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
iterbytes = iter
else:
def iterbytes(buf):
return (ord(byte) for byte in buf)
try:
from base64 import b85decode
except ImportError:
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{{|}}~")
def b85decode(b):
_b85dec = [None] * 256
for i, c in enumerate(iterbytes(_b85alphabet)):
_b85dec[c] = i
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in iterbytes(chunk):
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(iterbytes(chunk)):
if _b85dec[c] is None:
raise ValueError(
'bad base85 character at position %d' % (i + j)
)
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i)
result = b''.join(out)
if padding:
result = result[:-padding]
return result
def bootstrap(tmpdir=None):
# Import pip so we can use it to install pip and maybe setuptools too
import pip
from pip.commands.install import InstallCommand
from pip.req import InstallRequirement
# Wrapper to provide default certificate with the lowest priority
class CertInstallCommand(InstallCommand):
def parse_args(self, args):
# If cert isn't specified in config or environment, we provide our
# own certificate through defaults.
# This allows user to specify custom cert anywhere one likes:
# config, environment variable or argv.
if not self.parser.get_default_values().cert:
self.parser.defaults["cert"] = cert_path # calculated below
return super(CertInstallCommand, self).parse_args(args)
pip.commands_dict["install"] = CertInstallCommand
implicit_pip = True
implicit_setuptools = True
implicit_wheel = True
# Check if the user has requested us not to install setuptools
if "--no-setuptools" in sys.argv or os.environ.get("PIP_NO_SETUPTOOLS"):
args = [x for x in sys.argv[1:] if x != "--no-setuptools"]
implicit_setuptools = False
else:
args = sys.argv[1:]
# Check if the user has requested us not to install wheel
if "--no-wheel" in args or os.environ.get("PIP_NO_WHEEL"):
args = [x for x in args if x != "--no-wheel"]
implicit_wheel = False
# We only want to implicitly install setuptools and wheel if they don't
# already exist on the target platform.
if implicit_setuptools:
try:
import setuptools # noqa
implicit_setuptools = False
except ImportError:
pass
if implicit_wheel:
try:
import wheel # noqa
implicit_wheel = False
except ImportError:
pass
# We want to support people passing things like 'pip<8' to get-pip.py which
# will let them install a specific version. However because of the dreaded
# DoubleRequirement error if any of the args look like they might be a
# specific for one of our packages, then we'll turn off the implicit
# install of them.
for arg in args:
try:
req = InstallRequirement.from_line(arg)
except Exception:
continue
if implicit_pip and req.name == "pip":
implicit_pip = False
elif implicit_setuptools and req.name == "setuptools":
implicit_setuptools = False
elif implicit_wheel and req.name == "wheel":
implicit_wheel = False
# Add any implicit installations to the end of our args
if implicit_pip:
args += ["pip{pip_version}"]
if implicit_setuptools:
args += ["setuptools{setuptools_version}"]
if implicit_wheel:
args += ["wheel{wheel_version}"]
delete_tmpdir = False
try:
# Create a temporary directory to act as a working directory if we were
# not given one.
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
delete_tmpdir = True
# We need to extract the SSL certificates from requests so that they
# can be passed to --cert
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
sys.exit(pip.main(["install", "--upgrade"] + args))
finally:
# Remove our temporary directory
if delete_tmpdir and tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
def main():
tmpdir = None
try:
# Create a temporary working directory
tmpdir = tempfile.mkdtemp()
# Unpack the zipfile into the temporary directory
pip_zip = os.path.join(tmpdir, "pip.zip")
with open(pip_zip, "wb") as fp:
fp.write(b85decode(DATA.replace(b"\n", b"")))
# Add the zipfile to sys.path so that we can import it
sys.path.insert(0, pip_zip)
# Run the bootstrap
bootstrap(tmpdir=tmpdir)
finally:
# Clean up our temporary working directory
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
DATA = b"""
{zipfile}
"""
if __name__ == "__main__":
main()
|
the-stack_0_27970
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
import datetime
from dateutil.relativedelta import *
from netforce import access
class ReportCurrency(Model):
_name = "report.currency"
_transient = True
_fields = {
"date_from": fields.Date("From Date"),
"date_to": fields.Date("To Date"),
}
def default_get(self, field_names=None, context={}, **kw):
defaults = context.get("defaults", {})
date_from = defaults.get("date_from")
date_to = defaults.get("date_to")
if not date_from and not date_to:
date_from = datetime.date.today().strftime("%Y-%m-01")
date_to = (datetime.date.today() + relativedelta(day=31)).strftime("%Y-%m-%d")
return {
"date_from": date_from,
"date_to": date_to,
}
def get_report_data(self, ids, context={}):
company_id = access.get_active_company()
comp = get_model("company").browse(company_id)
if ids:
params = self.read(ids, load_m2o=False)[0]
else:
params = self.default_get(load_m2o=False, context=context)
settings = get_model("settings").browse(1)
date_from = params.get("date_from")
date_to = params.get("date_to")
receivable_accounts = []
payable_accounts = []
bank_accounts = []
deposit_accounts = []
ctx = {
#"date_from": date_from, # TODO: double-check that don't need date from
"date_to": date_to,
"currency_date": date_to,
}
for acc in get_model("account.account").search_browse([["type", "=", "receivable"]], context=ctx):
bal_reval = get_model("currency").convert(
acc.balance_cur, acc.currency_id.id, acc.company_currency_id.id, date=date_to, rate_type="sell")
vals = {
"code": acc.code,
"name": acc.name,
"balance_cur": acc.balance_cur,
"account_currency_code": acc.currency_id.code,
"company_currency_code": acc.company_currency_id.code,
"balance": acc.balance,
"balance_reval": bal_reval,
"unreal_gain": get_model("currency").convert(bal_reval - acc.balance, acc.company_currency_id.id, settings.currency_id.id, date=date_to, rate_type="sell"),
}
receivable_accounts.append(vals)
for acc in get_model("account.account").search_browse([["type", "=", "payable"]], context=ctx):
bal_reval = get_model("currency").convert(
acc.balance_cur, acc.currency_id.id, acc.company_currency_id.id, date=date_to, rate_type="buy")
vals = {
"code": acc.code,
"name": acc.name,
"balance_cur": acc.balance_cur,
"account_currency_code": acc.currency_id.code,
"company_currency_code": acc.company_currency_id.code,
"balance": acc.balance,
"balance_reval": bal_reval,
"unreal_gain": get_model("currency").convert(bal_reval - acc.balance, acc.company_currency_id.id, settings.currency_id.id, date=date_to, rate_type="buy"),
}
payable_accounts.append(vals)
for acc in get_model("account.account").search_browse([["type", "=", "bank"]], context=ctx):
bal_reval = get_model("currency").convert(
acc.balance_cur, acc.currency_id.id, acc.company_currency_id.id, date=date_to, rate_type="sell")
vals = {
"code": acc.code,
"name": acc.name,
"balance_cur": acc.balance_cur,
"account_currency_code": acc.currency_id.code,
"company_currency_code": acc.company_currency_id.code,
"balance": acc.balance,
"balance_reval": bal_reval,
"unreal_gain": get_model("currency").convert(bal_reval - acc.balance, acc.company_currency_id.id, settings.currency_id.id, date=date_to, rate_type="sell"),
}
bank_accounts.append(vals)
for acc in get_model("account.account").search_browse([["type", "=", "cur_liability"]], context=ctx):
bal_reval = get_model("currency").convert(
acc.balance_cur, acc.currency_id.id, acc.company_currency_id.id, date=date_to, rate_type="sell")
vals = {
"code": acc.code,
"name": acc.name,
"balance_cur": acc.balance_cur,
"account_currency_code": acc.currency_id.code,
"company_currency_code": acc.company_currency_id.code,
"balance": acc.balance,
"balance_reval": bal_reval,
"unreal_gain": get_model("currency").convert(bal_reval - acc.balance, acc.company_currency_id.id, settings.currency_id.id, date=date_to, rate_type="sell"),
}
deposit_accounts.append(vals)
data = {
"date_from": date_from,
"date_to": date_to,
"company_name": comp.name,
"company_currency": settings.currency_id.code,
"receivable_accounts": receivable_accounts,
"payable_accounts": payable_accounts,
"bank_accounts": bank_accounts,
"deposit_accounts": deposit_accounts,
"totals_receivable": {
"unreal_gain": sum(a["unreal_gain"] for a in receivable_accounts),
},
"totals_payable": {
"unreal_gain": sum(a["unreal_gain"] for a in payable_accounts),
},
"totals_bank": {
"unreal_gain": sum(a["unreal_gain"] for a in bank_accounts),
},
"totals_deposit": {
"unreal_gain": sum(a["unreal_gain"] for a in deposit_accounts),
},
"total_exposure":
sum(a["unreal_gain"] for a in receivable_accounts) +
sum(a["unreal_gain"] for a in payable_accounts) +
sum(a["unreal_gain"] for a in bank_accounts) +
sum(a["unreal_gain"] for a in deposit_accounts),
}
return data
def get_fx_exposure(self, date_from, date_to, track_id=None, track2_id=None, context={}):
ctx = {
"defaults": {
"date_from": date_from,
"date_to": date_to,
"track_id": track_id,
"track2_id": track2_id,
}
}
data = self.get_report_data(None, ctx)
return data["total_exposure"]
ReportCurrency.register()
|
the-stack_0_27973
|
def massfractionvtime(datafile, end, datafile2 = 'None', min_mf = .00000001, time_spacing = 10, h_ratio = [3, 1], zz_wanted = 'None', zz_wanted2 = 'None', aa_wanted = 'None', aa_wanted2 = 'None', nuc_names_wanted = 'None', nuc_names_wanted2 = 'None'):
'''
Inputs: datafile = ts file
datafile2 = optional second ts file
end = k value at end of desired time
min_mf = cutoff point below which mass fraction does not appear on the graph, default to .00000001
time_spacing = desired interval between x-axis ticks, default to .2
h_ratio = height ratio (if plotting multiple plots), default to 3:1
zz_wanted = list of atomic numbers of desired isotopes (if multiple isotopes of the same element are desired, their zz values must be added multiple times)
zz_wanted2 = list of atomic numbers of desired istopes from second data file (if multiple isotopes of the same elemtn are desire, their zz values must be added multiple times)
aa_wanted = list of atomic masses of desired isotopes (used in conjunction with zz_wanted)
aa_wanted2 = list of atomic masses of desired isotopes of second data file(used in conjunctino with zz_wanted)
nuc_names_wanted = list of desired species names, formatted as '$^{aa}$Symbol' (best option when plotting specific isotopes)
nuc_names_wanted2 = list of desired species names from second data file, formatted as '$^{aa}$Symbol' (best option when plotting specific isotopes)
Outputs: plot of mass fraction vs time
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib.ticker as plticker
import matplotlib.gridspec as gridspec
import find_file_type as fft
import read_ts_file as rtf
file_type = fft.find_file_type(datafile)
if file_type != 'ts':
print("Data file must be ts file")
elif file_type == 'ts':
#create plot space
plt.figure(1)
#set up grid layout
total_height = h_ratio[0] + h_ratio[1]
gs = gridspec.GridSpec(total_height, 1)
#Read ts file, use variables.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile)
print ("File read.")
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz, aa)
num_species_total = np.shape(xmf)[1]
#Assign each species a random color.
colors_array = []
for counter in np.arange(1, num_species_total+1):
item1 = np.random.rand(counter)[0]
item2 = np.random.rand(counter)[0]
item3 = np.random.rand(counter)[0]
colors_array.append(item1)
colors_array.append(item2)
colors_array.append(item3)
colors_array = np.asarray(colors_array)
colors_array = colors_array.reshape((num_species_total,3))
print ("Colors assigned.")
ax1 = plt.subplot(1, 1, 1)
for counter in np.arange(0, num_species_total):
if zz_wanted != 'None':
if zz[counter] in zz_wanted and aa_wanted == 'None': #Plot all isotopes of an element.
if datafile2 == "None":
plt.plot(time, xmf[:, counter], label = nuc_name[counter])
else:
plt.plot(time, xmf[:, counter], label = datafile + ": " + nuc_name[counter])
print ("Data assigned to plot.")
elif zz[counter] in zz_wanted and aa[counter] in aa_wanted: #Plot by atomic number and mass number.
if datafile2 == "None":
plt.plot(time, xmf[:, counter], label = nuc_name[counter])
else:
plt.plot(time, xmf[:, counter], label = datafile + ": " + nuc_name[counter])
print ("Data assigned to plot.")
zz_wanted.remove(zz[counter])
elif nuc_names_wanted != 'None': #Listed nuclear names switch plotting mechanism.
break
elif np.amax(xmf[:, counter]) >= min_mf: #Plot all species over specified threshold.
plt.plot(time, xmf[:, counter])
if nuc_names_wanted != 'None': #Sort through list to find mass fraction of named species, and plot.
for counter in np.arange(0, len(nuc_names_wanted)):
species_number = nuc_name.index(nuc_names_wanted[counter])
species_number = int(species_number)
if datafile2 == "None":
plt.plot(time, xmf[:, species_number], color = colors_array[species_number], label = nuc_name[species_number])
else:
plt.plot(time, xmf[:, species_number], color = colors_array[species_number], label = datafile + ": " + nuc_name[species_number])
print ("Data assigned to plot.")
#Read and Plot optional second ts file
if datafile2 != 'None':
#Error check file type
file_type = fft.find_file_type(datafile2)
if file_type != 'ts':
print("Second data file must be ts file")
elif file_type == 'ts':
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile2)
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz, aa)
num_species_total = np.shape(xmf)[1]
print("Second File Read")
for counter in np.arange(0, num_species_total):
if zz_wanted2 != 'None':
if zz[counter] in zz_wanted2 and aa_wanted2 == 'None': #Plot all isotopes of an element
plt.plot(time, xmf[:, counter], linestyle = 'dashed', label = datafile2 + ": " + nuc_name[counter])
print("Second File Data Assigned to Plot")
elif zz[counter] in zz_wanted2 and aa[counter] in aa_wanted2:#Sort through list to find mass fraction of named species, and plot.
plt.plot(time, xmf[:, counter], linestyle = 'dashed', label = datafile2 + ": " + nuc_name[counter])
zz_wanted2.remove(zz[counter])
print("Second File Data Assigned to Plot")
elif nuc_names_wanted2 != 'None':
break
elif np.amax(xmf[:, counter]) >= min_mf: #Plot all species over specified threshold.
plt.plot(time, xmf[:, counter], linestyle = 'dashed')
print("Second Data File Assigned to Plot")
if nuc_names_wanted2 != 'None':#Sort through list to find mass fraction of named species, and plot.
for counter in np.arange(0, len(nuc_names_wanted2)):
species_number = nuc_name.index(nuc_names_wanted2[counter])
species_number = int(species_number)
plt.plot(time, xmf[:, species_number], color = colors_array[species_number], linestyle = 'dashed', label = datafile2 + ": " + nuc_name[species_number])
print("Second File Data Assigned to Plot")
#Format axes
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.995, box.height])
#Create legend
if nuc_names_wanted or nuc_names_wanted2 or zz_wanted or zz_wanted2 or aa_wanted or aa_wanted2 != "None":
ax1.legend(loc = 'center left', bbox_to_anchor = (1, 0.5), fontsize = 10)
#Format and label axes
plt.yscale('log')
plt.ylim(min_mf, 1.5)
plt.ylabel("Mass Fraction")
plt.xscale('linear')
plt.xlim(time[0], time[end])
plt.xlabel ("Time (s)")
print ("Axes formatted.")
#Add x ticks at specified intervals
x_labels = []
tick = time[0]
while tick <= time[end]:
tick = float("{0:.1f}".format(tick))
x_labels.append(tick)
tick += time_spacing
loc = plticker.MultipleLocator(base=time_spacing)
ax1.xaxis.set_major_locator(loc)
plt.xticks(x_labels, x_labels)
print ("Axes labeled.")
#Remove superfluous ticks, show grid line instead
plt.tick_params(axis='both', which='both', bottom='on', top='on', labelbottom='on', left='off', right='off', labelleft='on')
plt.grid(True)
print ("Grid line.")
#Show graph
plt.show()
print ("Plot shown.")
|
the-stack_0_27974
|
from pypykatz.registry.offline_parser import OffineRegistry
import subprocess
import json
import os
class sam:
def __init__(self):
self.output='./output/'
self.system_hive_filename="system.hive"
self.sam_hive_filename="sam.hive"
self.security_hive_filename="security.hive"
self.result_filename="sam_result.json"
self.pypykatz=".\\pypykatz\\pypykatz"
def dump_hive(self):
system_hive_path=self.output+self.system_hive_filename
sam_hive_path=self.output+self.sam_hive_filename
security_hive_path=self.output+self.security_hive_filename
save_system_command="reg save hklm\\system {filename} -y".format(filename=system_hive_path)
run_save_system=subprocess.Popen(args=save_system_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
run_save_system.communicate()
save_sam_command="reg save hklm\\sam {filename} -y".format(filename=sam_hive_path)
run_save_sam=subprocess.Popen(args=save_sam_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
run_save_sam.communicate()
save_security_command="reg save hklm\\security {filename} -y".format(filename=security_hive_path)
run_save_security=subprocess.Popen(args=save_security_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
run_save_security.communicate()
if os.path.isfile(system_hive_path) and os.path.isfile(sam_hive_path) and os.path.isfile(security_hive_path):
system_size=os.path.getsize(system_hive_path)
sam_size=os.path.getsize(sam_hive_path)
security_size=os.path.getsize(security_hive_path)
if system_size==0 and sam_size==0 and security_size==0:
return False
else:
return True
else:
return False
def get_secret_data(self,secret_data_json):
'''
with open(secret_file, 'r') as json_file:
json_data=json.load(json_file)
'''
json_data=json.loads(secret_data_json)
secret_data=[]
for item in json_data['SAM']['local_users']:
secret_data.append(item)
return secret_data
def get_result(self,target_name,login_data):
desc=target_name+" Secret Data"
result={}
result['desc']=desc
data=[]
for item in login_data:
data_item={}
data_item['username']=item['username']
data_item['nt_hash']=item['nt_hash']
data_item['lm_hash']=item['lm_hash']
data_item['rid']=item['rid']
data.append(data_item)
result['data']=data
return result
def run(self,target_list):
result=[]
if 'sam' in target_list:
res=self.dump_hive()
if res:
try:
system_hive=self.output+self.system_hive_filename
sam_hive=self.output+self.sam_hive_filename
security_hive=self.output+self.security_hive_filename
or_result = OffineRegistry.from_files(system_hive,sam_hive,security_hive)
or_result_json=or_result.to_json()
secret_data=self.get_secret_data(or_result_json)
sam_result=self.get_result('SAM',secret_data)
result.append(sam_result)
except Exception as e:
pass
else:
print('[-]Insufficient permissions')
return result
|
the-stack_0_27975
|
import numpy as np
import sys
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.EVOL.ALIMITevol import ALIMITevol
from UTILS.Tools import Tools
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class ConvectiveTurnoverTimescaleEvolutionResolutionStudy(Calculus, ALIMITevol, Tools, object):
def __init__(self, filename, ig, data_prefix):
super(ConvectiveTurnoverTimescaleEvolutionResolutionStudy, self).__init__(ig)
# load data to a list of structured arrays
eht = []
for ffile in filename:
eht.append(self.customLoad(ffile))
# declare data lists
t_timec, t_tc = [], []
nx, ny, nz = [], [], []
tavg = []
for i in range(len(filename)):
# load temporal evolution
t_timec.append(self.getRAdata(eht[i],'t_timec'))
t_tc.append(self.getRAdata(eht[i],'t_tc'))
nx.append(self.getRAdata(eht[i],'nx'))
ny.append(self.getRAdata(eht[i],'ny'))
nz.append(self.getRAdata(eht[i],'nz'))
tavg.append(self.getRAdata(eht[i], 'tavg'))
# share data across the whole class
self.t_timec = t_timec
self.t_tc = t_tc
self.data_prefix = data_prefix
self.nx = nx
self.ny = ny
self.nz = nz
self.tavg = tavg
def plot_tconvturn_evolution(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
grd = self.t_timec
plt1 = self.t_tc
# plt2 = self.t_epsD
# load resolution
nx = self.nx
ny = self.ny
nz = self.nz
tavg = self.tavg
t_tc = self.t_tc
# find maximum resolution data
grd_maxres = self.maxresdata(grd)
plt1_maxres = self.maxresdata(plt1)
plt_interp = []
for i in range(len(grd)):
plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
if (LAXIS != 2):
print("ERROR(ConvectiveTurnoverTimescaleEvolutionStudy.py): Only LAXIS=2 is supported.")
sys.exit()
plt10_tmp = plt1[0]
plt11_tmp = plt1[0]
plt1_foraxislimit = []
plt1max = np.max(plt1[0])
for plt1i in plt1:
if (np.max(plt1i) > plt1max):
plt1_foraxislimit = plt1i
# set plot boundaries
to_plot = [plt1_foraxislimit]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('convective turnover timescale evolution')
for i in range(len(grd)):
plt.plot(grd[i], plt1[i], label=str(nx[i]) + ' x ' + str(ny[i]) + ' x ' + str(nz[i]) + ' '
+ '(tavg = ' + str(np.round(tavg[i],1)) + ' s = '
+ str(np.round(tavg[i]/np.mean(t_tc[i]),1)) + ' TOs)')
# plt.plot(grd1,plt2,color='g',label = r'$epsD$')
# define and show x/y LABELS
setxlabel = r"t (s)"
setylabel = r"timescale (s)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 12})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'tconvturnover_evol_res.png')
# find data with maximum resolution
def maxresdata(self, data):
tmp = 0
for idata in data:
if idata.shape[0] > tmp:
data_maxres = idata
else:
tmp = idata.shape[0]
return data_maxres
|
the-stack_0_27976
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train the DNC on a repeated copy task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers.python.layers import initializers
from dnc import dnc
import numpy as np
import cv2
from scipy import ndimage as nd
from PIL import Image
import os, sys
import time
from utility import alrc
experiment_number = 118
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("hidden_size", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 64, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_integer("clip_value", 0, "Maximum absolute value of controller and dnc outputs.")
tf.flags.DEFINE_bool("use_batch_norm", True, "Use batch normalization in generator.")
tf.flags.DEFINE_string("model", "LSTM", "LSTM or DNC.")
tf.flags.DEFINE_integer("projection_size", 0, "Size of projection layer. Zero for no projection.")
tf.flags.DEFINE_bool("is_input_embedder", False, "Embed inputs before they are input.")
# Optimizer parameters.
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("replay_size", 25000, "Maximum examples in ring buffer.")
tf.flags.DEFINE_integer("avg_replays", 4, "Mean frequency each experience is used.")
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10, "Epsilon used for RMSProp optimizer.")
tf.flags.DEFINE_float("L2_norm", 1.e-5, "Decay rate for L2 regularization. 0 for no regularization.")
# Task parameters
tf.flags.DEFINE_integer("img_side", 96, "Number of image pixels for square image")
tf.flags.DEFINE_integer("num_steps", 20, "Number of image pixels for square image")
tf.flags.DEFINE_integer("step_size", 20, "Distance STEM probe moves at each step (in px).")
tf.flags.DEFINE_integer("num_actions", 2, "Number of parameters to describe actions.")
tf.flags.DEFINE_integer("shuffle_size", 2000, "Size of moving buffer to sample data from.")
tf.flags.DEFINE_integer("prefetch_size", 10, "Number of batches to prepare in advance.")
# Training options.
tf.flags.DEFINE_float("actor_lr", 0.0007, "Actor learning rate.")
tf.flags.DEFINE_float("critic_lr", 0.001, "Critic learning rate.")
tf.flags.DEFINE_float("generator_lr", 0.003, "Generator learning rate.")
tf.flags.DEFINE_float("gamma", 0.97, "Reward/loss decay.")
tf.flags.DEFINE_bool("is_advantage_actor_critic", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_bool("is_cyclic_generator_learning_rate", True, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_integer("supervision_iters", 100_000, "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_start", 1., "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_end", 0., "Starting value for supeversion.")
if FLAGS.supervision_iters:
#Flag will not be used
tf.flags.DEFINE_float("supervision", 0.5, "Weighting for known discounted future reward.")
else:
#Flag will be used
tf.flags.DEFINE_float("supervision", 0.0, "Weighting for known discounted future reward.")
tf.flags.DEFINE_bool("is_target_actor", True and FLAGS.supervision != 1, "True to use target actor.")
tf.flags.DEFINE_bool("is_target_critic", True and FLAGS.supervision != 1, "True to use target critic.")
tf.flags.DEFINE_bool("is_target_generator", False, "True to use target generator.")
tf.flags.DEFINE_integer("update_frequency", 0, "Frequency of hard target network updates. Zero for soft updates.")
tf.flags.DEFINE_float("target_decay", 0.9997, "Decay rate for target network soft updates.")
tf.flags.DEFINE_bool("is_generator_batch_norm_tracked", False, "True to track generator batch normalization.")
tf.flags.DEFINE_bool("is_positive_qs", True, "Whether to clip qs to be positive.")
tf.flags.DEFINE_bool("is_infilled", False, "True to use infilling rather than generator.")
tf.flags.DEFINE_bool("is_prev_position_input", True, "True to input previous positions.")
tf.flags.DEFINE_bool("is_ornstein_uhlenbeck", True, "True for O-U exploration noise.")
tf.flags.DEFINE_bool("is_noise_decay", True, "Decay noise if true.")
tf.flags.DEFINE_float("ou_theta", 0.1, "Drift back to mean.")
tf.flags.DEFINE_float("ou_sigma", 0.2, "Size of random process.")
tf.flags.DEFINE_bool("is_rel_to_truth", False, "True to normalize losses using expected losses.")
tf.flags.DEFINE_bool("is_clipped_reward", True, "True to clip rewards.")
tf.flags.DEFINE_bool("is_clipped_critic", False, "True to clip critic predictions for actor training.")
tf.flags.DEFINE_float("over_edge_penalty", 0.05, "Penalty for action going over edge of image.")
tf.flags.DEFINE_bool("is_prioritized_replay", False, "True to prioritize the replay of difficult experiences.")
tf.flags.DEFINE_bool("is_biased_prioritized_replay", False, "Priority sampling without bias correction.")
tf.flags.DEFINE_bool("is_relative_to_spirals", False, "True to compare generator losses against losses for spirals.")
tf.flags.DEFINE_bool("is_self_competition", False, "Oh it is on. True to compete against past versions of itself.")
tf.flags.DEFINE_float("norm_generator_losses_decay", 0.997, "Divide generator losses by their running mean. Zero for no normalization.")
tf.flags.DEFINE_bool("is_minmax_reward", False, "True to use highest losses for actor loss.")
tf.flags.DEFINE_integer("start_iter", 0, "Starting iteration")
tf.flags.DEFINE_integer("train_iters", 500_000, "Training iterations")
tf.flags.DEFINE_integer("val_examples", 20_000, "Number of validation examples")
tf.flags.DEFINE_integer("style_loss", 0, "Weighting of style loss. Zero for no style loss.")
tf.flags.DEFINE_string("model_dir",
f"//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/recurrent_conv-1/{experiment_number}/",
"Working directory.")
tf.flags.DEFINE_string("data_file",
"//Desktop-sa1evjv/h/96x96_stem_crops.npy",
"Datafile containing 19769 96x96 downsampled STEM crops.")
tf.flags.DEFINE_integer("report_freq", 10, "How often to print losses to the console.")
os.chdir(FLAGS.model_dir)
sys.path.insert(0, FLAGS.model_dir)
def norm_img(img, min=None, max=None, get_min_and_max=False):
if min == None:
min = np.min(img)
if max == None:
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
if get_min_and_max:
return img.astype(np.float32), (min, max)
else:
return img.astype(np.float32)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def disp(img):
#if len(img.shape) == 3:
# img = np.sum(img, axis=2)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
def run_model(input_sequence, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
clip_value = FLAGS.clip_value
dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)
initial_state = dnc_core.initial_state(FLAGS.batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=dnc_core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
return output_sequence
class RingBuffer(object):
def __init__(
self,
action_shape,
observation_shape,
full_scan_shape,
batch_size,
buffer_size=1000,
num_past_losses=None,
):
self.buffer_size = buffer_size
self.actions = np.zeros([buffer_size]+list(action_shape)[1:])
self.observations = np.zeros([buffer_size]+list(observation_shape)[1:])
self.full_scans = np.zeros([buffer_size]+list(full_scan_shape)[1:])
self.position = 0
self._batch_size = batch_size
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
self.priorities = np.zeros([buffer_size])
self.indices = np.arange(buffer_size)
if FLAGS.is_self_competition:
self.past_losses = np.zeros([num_past_losses])
self.labels = np.zeros([buffer_size], np.int32)
def add(self, actions, observations, full_scans, labels=None):
i0 = self.position % self.buffer_size
num_before_cycle = min(self.buffer_size-i0, self._batch_size)
self.actions[i0:i0+num_before_cycle] = actions[:num_before_cycle]
self.observations[i0:i0+num_before_cycle] = observations[:num_before_cycle]
self.full_scans[i0:i0+num_before_cycle] = full_scans[:num_before_cycle]
num_remaining = self._batch_size - num_before_cycle
if num_remaining > 0:
self.actions[0:num_remaining] = actions[num_before_cycle:]
self.observations[:num_remaining] = observations[num_before_cycle:]
self.full_scans[:num_remaining] = full_scans[num_before_cycle:]
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
if self.position:
mean_priority = np.sum(self.priorities) / min(self.position, self.buffer_size)
else:
mean_priority = 0.3
self.priorities[i0:i0+num_before_cycle] = mean_priority*np.ones([num_before_cycle])
if num_before_cycle < self._batch_size:
self.priorities[0:num_remaining] = mean_priority*np.ones([self._batch_size - num_before_cycle])
if FLAGS.is_self_competition:
self.labels[i0:i0+num_before_cycle] = labels[:num_before_cycle]
if num_remaining > 0:
self.labels[0:num_remaining] = labels[num_before_cycle:]
self.position += self._batch_size
def get(self):
limit = min(self.position, self.buffer_size)
if FLAGS.is_prioritized_replay:
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
) #alpha=1
beta = 0.5 + 0.5*(FLAGS.train_iters - self.position)/FLAGS.train_iters
sampled_priority_weights = self.priorities[sample_idxs]**( -beta )
sampled_priority_weights /= np.max(sampled_priority_weights)
elif FLAGS.is_biased_prioritized_replay:
alpha = (FLAGS.train_iters - self.position)/FLAGS.train_iters
priorities = self.priorities**alpha
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
)
else:
sample_idxs = np.random.randint(0, limit, size=self._batch_size)
sampled_actions = np.stack([self.actions[i] for i in sample_idxs])
sampled_observations = np.stack([self.observations[i] for i in sample_idxs])
sampled_full_scans = np.stack([self.full_scans[i] for i in sample_idxs])
if FLAGS.is_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs, sampled_priority_weights
elif FLAGS.is_biased_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs
elif FLAGS.is_self_competition:
sampled_labels = np.stack([self.labels[i] for i in sample_idxs])
sampled_past_losses = np.stack([self.past_losses[i] for i in sampled_labels])
return sampled_actions, sampled_observations, sampled_full_scans, sampled_labels, sampled_past_losses
else:
return sampled_actions, sampled_observations, sampled_full_scans
def update_priorities(self, idxs, priorities):
"""For prioritized experience replay"""
self.priorities[idxs] = priorities
def update_past_losses(self, idxs, losses):
self.past_losses[idxs] = losses
class Agent(snt.AbstractModule):
def __init__(
self,
num_outputs,
name,
is_new=False,
noise_decay=None,
is_double_critic=False,
sampled_full_scans=None,
val_full_scans=None
):
super(Agent, self).__init__(name=name)
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
"projection_size": FLAGS.projection_size or None,
}
clip_value = FLAGS.clip_value
with self._enter_variable_scope():
components = dnc.Components(access_config, controller_config, num_outputs)
self._dnc_core = dnc.DNC(components, num_outputs, clip_value, is_new=False, is_double_critic=is_double_critic)
if is_new:
self._dnc_core_new = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
noise_decay=noise_decay,
sampled_full_scans=sampled_full_scans,
is_noise=True
)
if not val_full_scans is None:
self._dnc_core_val = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
sampled_full_scans=val_full_scans
)
self._initial_state = self._dnc_core.initial_state(FLAGS.batch_size)
#self._action_embedder = snt.Linear(output_size=64)
#self._observation_embedder = snt.Linear(output_size=64)
def _build(self, observations, actions):
#Tiling here is a hack to make inputs the same size
num_tiles = 2 // (actions.get_shape().as_list()[-1] // FLAGS.num_actions)
tiled_actions = tf.tile(actions, [1, 1, num_tiles])
input_sequence = tf.concat([observations, tiled_actions], axis=-1)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core,
inputs=input_sequence,
time_major=False,
initial_state=self._initial_state
)
return output_sequence
def get_new_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_new,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def get_val_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_val,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
@property
def variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name
)
@property
def trainable_variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name
)
def spectral_norm(w, iteration=1, in_place_updates=False):
"""Spectral normalization. It imposes Lipschitz continuity by constraining the
spectral norm (maximum singular value) of weight matrices.
Inputs:
w: Weight matrix to spectrally normalize.
iteration: Number of times to apply the power iteration method to
enforce spectral norm.
Returns:
Weight matrix with spectral normalization control dependencies.
"""
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(auto_name("u"),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
if in_place_updates:
#In-place control dependencies bottlenect training
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
else:
#Execute control dependency in parallel with other update ops
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u.assign(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_conv(
inputs,
num_outputs,
stride=1,
kernel_size=3,
padding='VALID',
biases_initializer=tf.zeros_initializer()
):
"""Convolutional layer with spectrally normalized weights."""
w = tf.get_variable(auto_name("kernel"), shape=[kernel_size, kernel_size, inputs.get_shape()[-1], num_outputs])
x = tf.nn.conv2d(input=inputs, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=padding)
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
def conv(
inputs,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
data_format="NHWC",
actv_fn=tf.nn.relu,
is_batch_norm=True,
is_spectral_norm=False,
is_depthwise_sep=False,
extra_batch_norm=False,
biases_initializer=tf.zeros_initializer,
weights_initializer=initializers.xavier_initializer,
transpose=False,
is_training=True
):
"""Convenience function for a strided convolutional or transpositional
convolutional layer.
Intro: https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1.
The order is: Activation (Optional) -> Batch Normalization (optional) -> Convolutions.
Inputs:
inputs: Tensor of shape `[batch_size, height, width, channels]` to apply
convolutions to.
num_outputs: Number of feature channels to output.
kernel_size: Side lenth of square convolutional kernels.
stride: Distance between convolutional kernel applications.
padding: 'SAME' for zero padding where kernels go over the edge.
'VALID' to discard features where kernels go over the edge.
activ_fn: non-linearity to apply after summing convolutions.
is_batch_norm: If True, add batch normalization after activation.
is_spectral_norm: If True, spectrally normalize weights.
is_depthwise_sep: If True, depthwise separate convolutions into depthwise
spatial convolutions, then 1x1 pointwise convolutions.
extra_batch_norm: If True and convolutions are depthwise separable, implement
batch normalization between depthwise and pointwise convolutions.
biases_initializer: Function to initialize biases with. None for no biases.
weights_initializer: Function to initialize weights with. None for no weights.
transpose: If True, apply convolutional layer transpositionally to the
described convolutional layer.
is_training: If True, use training specific operations e.g. batch normalization
update ops.
Returns:
Output of convolutional layer.
"""
x = inputs
num_spatial_dims = len(x.get_shape().as_list()) - 2
if biases_initializer == None:
biases_initializer = lambda: None
if weights_initializer == None:
weights_initializer = lambda: None
if not is_spectral_norm:
#Convolutional layer without spectral normalization
if transpose:
stride0 = 1
if type(stride) == list or is_depthwise_sep or stride % 1:
#Apparently there is no implementation of transpositional
#depthwise separable convolutions, so bilinearly upsample then
#depthwise separably convolute
if kernel_size != 1:
x = tf.image.resize_bilinear(
images=x,
size=stride if type(stride) == list else \
[int(stride*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
stride0 = stride
stride = 1
if type(stride0) == list and not is_depthwise_sep:
layer = tf.contrib.layers.conv2d
elif is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d_transpose
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
if type(stride0) != list:
if (is_depthwise_sep or stride0 % 1) and kernel_size == 1:
x = tf.image.resize_bilinear(
images=x,
size=[int(stride0*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
else:
if num_spatial_dims == 1:
layer = tf.contrib.layers.conv1d
elif num_spatial_dims == 2:
if is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
else:
#Weights are spectrally normalized
x = spectral_norm_conv(
inputs=x,
num_outputs=num_outputs,
stride=stride,
kernel_size=kernel_size,
padding=padding,
biases_initializer=biases_initializer())
if actv_fn:
x = actv_fn(x)
if is_batch_norm and FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def residual_block(inputs, skip=3, is_training=True):
"""Residual block whre the input is added to the signal after skipping some
layers. This architecture is good for learning purturbative transformations.
If no layer is provided, it defaults to a convolutional layer.
Deep residual learning: https://arxiv.org/abs/1512.03385.
Inputs:
inputs: Tensor to apply residual block to. Outputs of every layer will
have the same shape.
skip: Number of layers to skip before adding input to layer output.
layer: Layer to apply in residual block. Defaults to convolutional
layer. Custom layers must support `inputs`, `num_outputs` and `is_training`
arguments.
Returns:
Final output of residual block.
"""
x = x0 = inputs
def layer(inputs, num_outputs, is_training, is_batch_norm, actv_fn):
x = conv(
inputs=inputs,
num_outputs=num_outputs,
is_training=is_training,
actv_fn=actv_fn
)
return x
for i in range(skip):
x = layer(
inputs=x,
num_outputs=x.get_shape()[-1],
is_training=is_training,
is_batch_norm=i < skip - 1,
actv_fn=tf.nn.relu
)
x += x0
if FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
class Generator(snt.AbstractModule):
def __init__(self,
name,
is_training
):
super(Generator, self).__init__(name=name)
self._is_training = is_training
def _build(self, inputs):
x = inputs
std_actv = tf.nn.relu#lambda x: tf.nn.leaky_relu(x, alpha=0.1)
is_training = self._is_training
is_depthwise_sep = False
base_size = 32
#x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x = conv(
x,
num_outputs=32,
is_training=is_training,
actv_fn=std_actv
)
#Encoder
for i in range(1, 3):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
actv_fn=std_actv
)
if i == 2:
low_level = x
#Residual blocks
for _ in range(5): #Number of blocks
x = residual_block(
x,
skip=3,
is_training=is_training
)
#Decoder
for i in range(1, -1, -1):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
transpose=True,
actv_fn=std_actv
)
x = conv(
x,
num_outputs=base_size,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training
)
#Project features onto output image
x = conv(
x,
num_outputs=1,
biases_initializer=None,
actv_fn=None,
is_batch_norm=False,
is_training=is_training
)
return x
@property
def variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name
)
@property
def trainable_variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name
)
def construct_partial_scans(actions, observations):
"""
actions: [batch_size, num_steps, 2]
observations: [batch_size, num_steps, 10]
"""
#Last action unused and the first action is always the same
actions = np.concatenate((np.ones([FLAGS.batch_size, 1, 2]), actions[:,:-1,:]), axis=1)
starts = 0.5*FLAGS.img_side + FLAGS.step_size*(np.cumsum(actions, axis=1) - actions)
#starts = np.zeros(actions.shape)
#starts[:,0,:] = actions[:,0,:]
#for i in range(1, FLAGS.num_steps):
# starts[:,i,:] = actions[:,i,:] + starts[:,i-1,:]
#starts -= actions
#starts *= FLAGS.step_size
#starts += 0.5*FLAGS.img_side
positions = np.stack([starts + i*actions for i in range(FLAGS.step_size)], axis=-2)
x = np.minimum(np.maximum(positions, 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for k in range(FLAGS.num_steps):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[j,k,i,0]), int(x[j,k,i,1])] )
indices = np.array(indices)
indices = tuple([indices[:,i] for i in range(3)])
partial_scans = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
masks = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
partial_scans[indices] = observations.reshape([-1])
masks[indices] = 1
partial_scans /= np.maximum(masks, 1)
masks = np.minimum(masks, 1)
partial_scans = np.stack([partial_scans, masks], axis=-1)
return partial_scans
def target_update_ops(target_network, network, decay=FLAGS.target_decay, l2_norm=False):
t_vars = target_network.variables
v_vars = network.variables
update_ops = []
for t, v in zip(t_vars, v_vars):
if FLAGS.is_generator_batch_norm_tracked or not "BatchNorm" in t.name: #Don't track batch normalization
if l2_norm:
v_new = (1-FLAGS.L2_norm)*v
op = v.assign(v_new)
update_ops.append(op)
op = t.assign(decay*t + (1-decay)*v_new)
update_ops.append(op)
else:
op = t.assign(decay*t + (1-decay)*v)
update_ops.append(op)
print(t.name.replace("target_", "") == v.name, t.name.replace("target_", ""), v.name)
return update_ops
def load_data(shape):
data_ph = tf.placeholder(tf.float32, shape=list(shape))
ds = tf.data.Dataset.from_tensor_slices(tuple([data_ph]))
if FLAGS.is_self_competition:
labels = tf.data.Dataset.range(0, list(shape)[0])
ds = tf.data.Dataset.zip((ds, labels))
ds = ds.shuffle(buffer_size=FLAGS.shuffle_size)
ds = ds.repeat()
ds = ds.batch(FLAGS.batch_size)
ds = ds.prefetch(FLAGS.prefetch_size)
iterator = ds.make_initializable_iterator()
return data_ph, iterator
@tf.custom_gradient
def overwrite_grads(x, y):
print("OG", x, y)
def grad(dy):
return y, None
return x, grad
def infill(data, mask):
return data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
#def infill(data, mask):
# x = np.zeros(data.shape)
# c = (cv2.GaussianBlur(mask.astype(np.float32), (7, 7), 3.5, None, 3.5) > 0).astype(np.float32)
# truth = data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
# x = (truth*c).astype(np.float32)
# return x
def fill(input):
return np.expand_dims(np.stack([infill(img, mask) for img, mask in zip(input[:,:,:,0], input[:,:,:,1])]), -1)
def flip_rotate(img, choice):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
if choice == 0:
return img
elif choice == 1:
return np.rot90(img, 1)
elif choice == 2:
return np.rot90(img, 2)
elif choice == 3:
return np.rot90(img, 3)
elif choice == 4:
return np.flip(img, 0)
elif choice == 5:
return np.flip(img, 1)
elif choice == 6:
return np.flip(np.rot90(img, 1), 0)
else:
return np.flip(np.rot90(img, 1), 1)
def draw_spiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
A spiral
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
return img
def average_filter(image):
kernel = tf.ones([5,5,1,1])
filtered_image = tf.nn.conv2d(image, kernel, strides=[1, 1, 1, 1], padding="VALID")
return filtered_image
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 2, 0., 2.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (2,2))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
def calc_generator_losses(img1, img2):
if FLAGS.data_file == "//Desktop-sa1evjv/h/96x96_stem_crops.npy":
img2 = blur(img2) #Gaussian blur
generator_losses = 10*tf.reduce_mean( (img1 - img2)**2, axis=[1,2,3] )
losses = generator_losses
if FLAGS.style_loss:
edges1 = tf.image.sobel_edges(img1)
edges2 = tf.image.sobel_edges(img2)
print("Edges:", edges1)
generator_losses += FLAGS.style_loss*tf.reduce_mean( (edges1 - edges2)**2, axis=[1,2,3,4] )
return generator_losses, losses
def main(unused_argv):
"""Trains the DNC and periodically reports the loss."""
graph = tf.get_default_graph()
action_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.num_actions]
observation_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size]
full_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 1]
partial_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 2]
images = np.load(FLAGS.data_file)
images[np.logical_not(np.isfinite(images))] = 0
images = np.stack([norm_img(x) for x in images])
train_images = images[:int(0.8*len(images))]
val_images = images[int(0.8*len(images)):]
train_data_ph, train_iterator = load_data(train_images.shape)
val_data_ph, val_iterator = load_data(val_images.shape)
if FLAGS.is_self_competition:
(full_scans, labels) = train_iterator.get_next()
(val_full_scans, val_labels) = val_iterator.get_next()
full_scans = full_scans[0]
val_full_scans = val_full_scans[0]
else:
(full_scans, ) = train_iterator.get_next()
(val_full_scans, ) = val_iterator.get_next()
if hasattr(tf, 'ensure_shape'):
full_scans = tf.ensure_shape(full_scans, full_scan_shape)
val_full_scans = tf.ensure_shape(val_full_scans, full_scan_shape)
else:
full_scans = tf.reshape(full_scans, full_scan_shape)
val_full_scans = tf.reshape(full_scans, full_scan_shape)
replay = RingBuffer(
action_shape=action_shape,
observation_shape=observation_shape,
full_scan_shape=full_scan_shape,
batch_size=FLAGS.batch_size,
buffer_size=FLAGS.replay_size,
num_past_losses=train_images.shape[0],
)
replay_actions_ph = tf.placeholder(tf.float32, shape=action_shape, name="replay_action")
replay_observations_ph = tf.placeholder(tf.float32, shape=observation_shape, name="replay_observation")
replay_full_scans_ph = tf.placeholder(tf.float32, shape=full_scan_shape, name="replay_full_scan")
partial_scans_ph = tf.placeholder(tf.float32, shape=partial_scan_shape, name="replay_partial_scan")
is_training_ph = tf.placeholder(tf.bool, name="is_training")
if FLAGS.is_noise_decay:
noise_decay_ph = tf.placeholder(tf.float32, shape=(), name="noise_decay")
else:
noise_decay_ph = None
if FLAGS.supervision_iters:
supervision_ph = tf.placeholder(tf.float32, name="supervision")
else:
supervision_ph = FLAGS.supervision
if FLAGS.is_prioritized_replay:
priority_weights_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="priority_weights")
if FLAGS.is_self_competition:
past_losses_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="past_losses")
batch_size = FLAGS.batch_size
if FLAGS.is_relative_to_spirals:
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
spiral = draw_spiral(coverage=coverage, side=FLAGS.img_side)
ys = [1/i**2 for i in range(9, 2, -1)]
xs = [np.sum(draw_spiral(coverage=c, side=FLAGS.img_side)) / FLAGS.img_side**2 for c in ys]
ub_idx = next(i for i, x in xs if x > coverage)
lb = xs[ub_idx-1]
ub = xs[ub_idx]
input_coverage = ( (coverage - lb)*X + (ub - coverage)*Y ) / (lb - ub)
actor = Agent(
num_outputs=FLAGS.num_actions,
is_new=True,
noise_decay=noise_decay_ph,
sampled_full_scans=full_scans,
val_full_scans=val_full_scans,
name="actor"
)
target_actor = Agent(num_outputs=FLAGS.num_actions, name="target_actor")
critic = Agent(num_outputs=1, is_double_critic=True, name="critic")
target_critic = Agent(num_outputs=1, is_double_critic=True, name="target_critic")
new_observations, new_actions = actor.get_new_experience()
#Last actions are unused
replay_observations = replay_observations_ph[:,:-1,:]
replay_actions = replay_actions_ph[:,:-1,:]
#First action must be added for actors (not critics)
start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
started_replay_actions = tf.concat([start_actions, replay_actions[:,:-1,:]], axis=1)
actions = actor(replay_observations, started_replay_actions)
if FLAGS.is_target_actor:
target_actions = target_actor(replay_observations, started_replay_actions)
elif FLAGS.supervision != 1:
target_actions = tf.stop_gradient(actions)
#The last action is never used, and the first action is diagonally north-east
#Shifting because network expect actions from previous steps to be inputted
#start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
#actions = tf.concat([start_actions, actions[:, :-1, :]], axis=1)
#target_actions = tf.concat([start_actions, target_actions[:, :-1, :]], axis=1)
actor_actions = tf.concat([replay_actions, actions], axis=-1)
qs = critic(replay_observations, actor_actions)
critic_qs = qs[:,:,:1]
actor_qs = qs[:,:,1:]
if FLAGS.is_target_critic:
target_actor_actions = tf.concat([replay_actions, target_actions], axis=-1)
target_actor_qs = target_critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
elif FLAGS.supervision != 1:
target_actor_qs = actor_qs#critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
if not FLAGS.is_infilled:
generator = Generator(name="generator", is_training=is_training_ph)
generation = generator(partial_scans_ph)
else:
generation = tf.py_func(fill, [partial_scans_ph], tf.float32)
if hasattr(tf, 'ensure_shape'):
generation = tf.ensure_shape(generation, full_scan_shape)
else:
generation = tf.reshape(generation, full_scan_shape)
generator_losses, losses = calc_generator_losses(generation, replay_full_scans_ph)
if FLAGS.is_target_generator and not FLAGS.is_infilled:
target_generator = Generator(name="target_generator", is_training=is_training_ph)
target_generation = target_generator(partial_scans_ph)
if FLAGS.is_minmax_reward:
errors = (target_generation - replay_full_scans_ph)**2
losses = tf.reduce_max( average_filter(errors), reduction_indices=[1,2,3] )
else:
target_generator_losses, losses = calc_generator_losses(target_generation, replay_full_scans_ph)
losses = target_generator_losses #For RL
else:
if FLAGS.is_minmax_reward:
errors = (generation - replay_full_scans_ph)**2
losses = tf.reduce_max( average_filter(errors), reduction_indices=[1,2,3] )
val_observations, val_actions = actor.get_val_experience()
unclipped_losses = losses
if FLAGS.is_positive_qs and (FLAGS.is_target_critic or FLAGS.supervision != 1):
target_actor_qs = tf.nn.relu(target_actor_qs)
if FLAGS.norm_generator_losses_decay:
mu = tf.get_variable(name="loss_mean", initializer=tf.constant(1., dtype=tf.float32))
mu_op = mu.assign(FLAGS.norm_generator_losses_decay*mu+(1-FLAGS.norm_generator_losses_decay)*tf.reduce_mean(losses))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mu_op)
losses /= tf.stop_gradient(mu)
if FLAGS.is_clipped_reward:
losses = alrc(losses)
if FLAGS.is_self_competition:
self_competition_losses = tf.where(
past_losses_ph > unclipped_losses,
tf.ones([FLAGS.batch_size]),
tf.zeros([FLAGS.batch_size])
)
losses += self_competition_losses
if FLAGS.over_edge_penalty:
positions = (
0.5 + #middle of image
FLAGS.step_size/(np.sqrt(2)*FLAGS.img_side) + #First step
(FLAGS.step_size/FLAGS.img_side)*tf.cumsum(replay_actions_ph[:,:-1,:], axis=1) # Actions
)
#new_positions = (
# positions - replay_actions_ph[:,:-1,:] + #Go back one action
# (FLAGS.step_size/FLAGS.img_side)*actions #New actions
# )
is_over_edge = tf.logical_or(tf.greater(positions, 1), tf.less(positions, 0))
is_over_edge = tf.logical_or(is_over_edge[:,:,0], is_over_edge[:,:,1])
over_edge_losses = tf.where(
is_over_edge,
FLAGS.over_edge_penalty*tf.ones(is_over_edge.get_shape()),
tf.zeros(is_over_edge.get_shape())
)
over_edge_losses = tf.cumsum(over_edge_losses, axis=1)
if FLAGS.supervision > 0 or FLAGS.is_advantage_actor_critic:
supervised_losses = []
for i in reversed(range(FLAGS.num_steps-1)):
if i == FLAGS.num_steps-1 - 1: #Extra -1 as idxs start from 0
step_loss = tf.expand_dims(losses, axis=-1)
else:
step_loss = FLAGS.gamma*step_loss
if FLAGS.over_edge_penalty:
step_loss += over_edge_losses[:,i:i+1]
supervised_losses.append(step_loss)
supervised_losses = tf.concat(supervised_losses, axis=-1)
if FLAGS.supervision < 1:
bellman_losses = tf.concat(
[FLAGS.gamma*target_actor_qs[:,1:,0], tf.expand_dims(losses, axis=-1)],
axis=-1
)
if FLAGS.over_edge_penalty:
bellman_losses += over_edge_losses
bellman_losses = supervision_ph * supervised_losses + (1 - supervision_ph) * bellman_losses
else:
bellman_losses = supervised_losses
if FLAGS.is_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
critic_losses = tf.reduce_mean( priority_weights_ph*unweighted_critic_losses )
else:
critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2 )
if FLAGS.is_biased_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
if FLAGS.is_clipped_critic:
actor_qs = alrc(actor_qs)
if FLAGS.is_advantage_actor_critic:
actor_losses = tf.reduce_mean( supervised_losses - actor_qs[:,:,0] )
else:
actor_losses = tf.reduce_mean( actor_qs )
#critic_losses /= FLAGS.num_steps
#actor_losses /= FLAGS.num_steps
#Outputs to provide feedback for the developer
info = {
"actor_losses": actor_losses,
"critic_losses": critic_losses,
"generator_losses": tf.reduce_mean(unclipped_losses)
}
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
info.update( {"priority_weights": unweighted_critic_losses} )
if FLAGS.is_self_competition:
info.update( {"unclipped_losses": unclipped_losses} )
outputs = {
"generation": generation[0,:,:,0],
"truth": replay_full_scans_ph[0,:,:,0],
"input": partial_scans_ph[0,:,:,0]
}
history_op = {
"actions": new_actions,
"observations": new_observations,
"full_scans": full_scans
}
if FLAGS.is_self_competition:
history_op.update( {"labels": labels} )
##Modify actor gradients
#[actor_grads] = tf.gradients(actor_losses, replay_actions_ph)
#actor_losses = overwrite_grads(actions, actor_grads)
start_iter = FLAGS.start_iter
train_iters = FLAGS.train_iters
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #Only use required GPU memory
#config.gpu_options.force_gpu_compatible = True
model_dir = FLAGS.model_dir
log_filepath = model_dir + "log.txt"
save_period = 1; save_period *= 3600
log_file = open(log_filepath, "a")
with tf.Session(config=config) as sess:
if FLAGS.is_target_actor:
if FLAGS.update_frequency <= 1:
update_target_critic_op = target_update_ops(target_actor, actor)
else:
update_target_critic_op = []
initial_update_target_critic_op = target_update_ops(target_actor, actor, decay=0)
else:
update_target_critic_op = []
initial_update_target_critic_op = []
if FLAGS.is_target_critic:
if FLAGS.update_frequency <= 1:
update_target_actor_op = target_update_ops(target_critic, critic)
else:
update_target_actor_op = []
initial_update_target_actor_op = target_update_ops(target_critic, critic, decay=0)
else:
update_target_actor_op = []
initial_update_target_actor_op = []
if FLAGS.is_target_generator and not FLAGS.is_infilled:
if FLAGS.update_frequency <= 1:
update_target_generator_op = target_update_ops(target_generator, generator, l2_norm=FLAGS.L2_norm)
else:
update_target_generator_op = []
initial_update_target_generator_op = target_update_ops(target_generator, generator, decay=0)
else:
update_target_generator_op = []
initial_update_target_generator_op = []
initial_update_target_network_ops = (
initial_update_target_actor_op +
initial_update_target_critic_op +
initial_update_target_generator_op
)
actor_lr = FLAGS.actor_lr
critic_lr = FLAGS.critic_lr
if FLAGS.is_cyclic_generator_learning_rate and not FLAGS.is_infilled:
generator_lr = tf.placeholder(tf.float32, name="generator_lr")
else:
generator_lr = FLAGS.generator_lr
#critic_rep = (critic_qs[:,:,0] - bellman_losses)**2
#ps = [critic_qs[0,:,0], target_actor_qs[0,:,0], bellman_losses[0], critic_rep[0]]
#ps = [critic.trainable_variables[0], target_critic.trainable_variables[0]]
ps = []
#p = bellman_losses[0]
#p = generation[0,:,:,0]
train_op_dependencies = [tf.print(p) for p in ps] + tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if not FLAGS.update_frequency:
update_target_network_ops = (
update_target_actor_op +
update_target_critic_op +
update_target_generator_op
)
train_op_dependencies += update_target_network_ops
train_ops = []
with tf.control_dependencies(train_op_dependencies):
actor_train_op = tf.train.AdamOptimizer(learning_rate=actor_lr).minimize(
loss=actor_losses, var_list=actor.trainable_variables)
critic_train_op = tf.train.AdamOptimizer(learning_rate=critic_lr).minimize(
loss=critic_losses, var_list=critic.trainable_variables)
train_ops += [actor_train_op, critic_train_op]
if not FLAGS.is_infilled:
generator_train_op = tf.train.AdamOptimizer(learning_rate=generator_lr).minimize(
loss=generator_losses, var_list=generator.trainable_variables)
train_ops.append(generator_train_op)
else:
generator_train_op = tf.no_op()
feed_dict = {}
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
saver = tf.train.Saver(max_to_keep=1)
noteable_saver = tf.train.Saver(max_to_keep=2)
if start_iter:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
else:
if len(initial_update_target_network_ops):
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
sess.run(train_iterator.initializer, feed_dict={train_data_ph: train_images})
sess.run(val_iterator.initializer, feed_dict={val_data_ph: val_images})
time0 = time.time()
for iter in range(start_iter, train_iters):
if iter < FLAGS.replay_size or not iter % FLAGS.avg_replays:
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
if FLAGS.is_noise_decay:
noise_decay = np.float32( (train_iters - iter)/train_iters )
feed_dict.update( {noise_decay_ph: noise_decay} )
history = sess.run(
history_op,
feed_dict=feed_dict)
replay.add(**history)
#Sample experiences from the replay
if FLAGS.is_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs, sampled_priority_weights = replay.get()
elif FLAGS.is_biased_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs = replay.get()
elif FLAGS.is_self_competition:
sampled_actions, sampled_observations, replay_sampled_full_scans, sampled_labels, sampled_past_losses = replay.get()
else:
sampled_actions, sampled_observations, replay_sampled_full_scans = replay.get()
replay_partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
if not FLAGS.is_infilled:
sampled_full_scans = []
partial_scans = []
spiral_scans = []
for sampled_full_scan, partial_scan in zip(replay_sampled_full_scans, replay_partial_scans):
c = np.random.randint(0, 8)
sampled_full_scans.append( flip_rotate(sampled_full_scan, c) )
partial_scans.append( flip_rotate(partial_scan, c) )
if FLAGS.is_relative_to_spirals:
spiral_scan = spiral * sampled_full_scan
spiral_scans.append( flip_rotate(spiral_scan, c) )
sampled_full_scans = np.stack( sampled_full_scans )
partial_scans = np.stack( partial_scans )
else:
sampled_full_scans = replay_sampled_full_scans
partial_scans = replay_partial_scans
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(True)
}
if FLAGS.is_prioritized_replay:
feed_dict.update({priority_weights_ph: sampled_priority_weights})
if FLAGS.supervision_iters:
supervision = FLAGS.supervision_start + min(iter, FLAGS.supervision_iters)*(FLAGS.supervision_end-FLAGS.supervision_start) / FLAGS.supervision_iters
feed_dict.update( {supervision_ph: supervision } )
if FLAGS.is_self_competition:
feed_dict.update( {past_losses_ph: sampled_past_losses} )
if FLAGS.is_cyclic_generator_learning_rate and not FLAGS.is_infilled:
envelope = FLAGS.generator_lr * 0.75**(iter/(train_iters//5))
cycle_half = train_iters//(10 - 1)
cycle_full = 2*cycle_half
cyclic_sawtooth = 1 - (min(iter%cycle_full, cycle_half) - min(iter%cycle_full - cycle_half, 0))/cycle_half
cyclic_lr = envelope*(0.2 + 0.8*cyclic_sawtooth)
feed_dict.update( {generator_lr: np.float32(cyclic_lr)} )
#Train
if iter in [0, 100, 500] or not iter % 25_000 or (0 <= iter < 10_000 and not iter % 1000) or iter == start_iter:
_, step_info, step_outputs = sess.run([train_ops, info, outputs], feed_dict=feed_dict)
for k in step_outputs:
save_loc = FLAGS.model_dir + k + str(iter)+".tif"
Image.fromarray( (0.5*step_outputs[k]+0.5).astype(np.float32) ).save( save_loc )
else:
_, step_info = sess.run([train_ops, info], feed_dict=feed_dict)
if FLAGS.update_frequency and not iter % FLAGS.update_frequency:
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
if FLAGS.is_prioritized_replay:
replay.update_priorities(sample_idxs, step_info["priority_weights"])
if FLAGS.is_self_competition:
replay.update_past_losses(sampled_labels, step_info["unclipped_losses"])
output = f"Iter: {iter}"
for k in step_info:
if k not in ["priority_weights", "unclipped_losses"]:
output += f", {k}: {step_info[k]}"
if not iter % FLAGS.report_freq:
print(output)
#if "nan" in output:
# saver.restore(
# sess,
# tf.train.latest_checkpoint(model_dir+"model/")
# )
try:
log_file.write(output)
except:
while True:
print("Issue writing log.")
time.sleep(1)
log_file = open(log_filepath, "a")
try:
log_file.write(output)
break
except:
continue
if iter in [train_iters//2-1, train_iters-1]:
noteable_saver.save(sess, save_path=model_dir+"noteable_ckpt/model", global_step=iter)
time0 = time.time()
start_iter = iter
elif time.time() >= time0 + save_period:
saver.save(sess, save_path=model_dir+"model/model", global_step=iter)
time0 = time.time()
val_losses_list = []
for iter in range(0, FLAGS.val_examples//FLAGS.batch_size):
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
sampled_actions, sampled_observations, sampled_full_scans = sess.run(
[val_actions, val_observations, val_full_scans],
feed_dict=feed_dict
)
partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(False)
}
val_losses = sess.run( unclipped_losses, feed_dict=feed_dict )
val_losses_list.append( val_losses )
val_losses = np.concatenate(tuple(val_losses_list), axis=0)
np.save(model_dir + "val_losses.npy", val_losses)
if __name__ == "__main__":
tf.app.run()
|
the-stack_0_27977
|
# adapted from https://ogertranslate.ml/translation/de-oger.js
# originally authored by https://github.com/MoriPastaPizza
translations = {
"Rainer": ["Ruiner", "Reiner", "Dreger", "Kaschber", "Legasdeniger", "Fettsack", "viddl Iddaliener", "Butter", "Mastoger", "Suppengmogo", "Speggi", "Vierteltonner"],
"Rainerle": ["Ruinerle", "Reinerle", "Dregerle", "Kaschberle", "Legasdenigerle", "Fettsackle", "viddl Iddaliener", "Butter", "Mastogerle", "Suppengmogole", "Speggi"],
"Winkler": ["Wongl", "Winggl", "Lard", "Bummsdi", "Golem", "Oger", "Jutubber", "Fettsack", "Mastoger", "Winklar", "Dr.Dr.h.c.M.A Winkler", "Speggi"],
"Drachenlord": ["Fettsack", "Wiggsgrübbl", "Speckeimer", "Lügenlord", "Hoch IQ Lord", "Jutubber", "Butter Golem", "Mastoger", "Mastsau", "Oger", "Suppengmogo", "Gumbllord", "Lustlord", "Informatiklord", "Speggi", "Vierteltonner", "Animewelt Lord", "Diabetesdumbo", "selbständiger Schichtarbeiter"],
"Lord": ["Lard", "Lerd", "Bummsdi", "Lügenlord", "Schwitzlord", "Sifflord", "Fettsack", "begnadeter Jutubber", "Butter Golem", "Oger", "Suppengmogo", "Arbeitslord"],
"Video": ["fideo", "fidio"],
"Videos": ["fideos", "fidios"],
"Drache": ["Dreche", "Dreger", "Fettsack", "viddl Iddaliener", "Wiggsgrübbl", "Net der Drache verdammte Aggst", "Speckeimer", "Suppengmogo", "Wuchtbrummer"],
"Bauch": ["Plautze", "Wampe", "Kessel", "Wuchtbrumme"],
"Haus": ["Schanze", "Schimmelschanze", "Ruine"],
"Schanze": ["Schanze", "Schimmelschanze", "Ruine"],
"alter": ["alda", "alla "],
"Alter": ["Alda", "Alla "],
"hallo": [*["meddl loide"]*2, "servus und wilkommne", "servus unn an herdsliches wilkommne", "gonitschiwa"],
"Hallo": [*["Meddl loide"]*2, "Servus und wilkommne", "Servus unn an herdsliches wilkommne", "Gonitschiwa"],
"leute": ["loide"],
"Leute": ["Loide"],
"schmeißen": ["nausgschmaßen"],
"geschmissen": ["nausgschmaßt"],
"schmeißt": ["nausschmaßen"],
"schmeiß": ["schmuaß"],
"blasen": ["blubber"],
"boxen": ["Brüggel Rausschmiß"],
"billig": ["billich"],
"Axt": ["Aggst"],
"Polizei": ["Bolizei"],
"Popcorn": ["Bobgorn"],
"popcorn": ["Bobgorn"],
"Post": ["Boscht"],
"post": ["boscht"],
"Idiot": ["Kaggnazi", "Wiggsgrübbl", "Affennecher", "Spaggn"],
"idiot": ["Kaggnazi", "Wiggsgrübbl", "Affennecher", "Spaggn"],
"Nazi": ["Kaggnazi"],
"nazi": ["kaggnazi"],
"Rudi": ["Rudi", "Rudi aus Buddeln", "SS-Obersturmbannführer Rudolf Winkler", "Nazi-Kriegsverbrecher Rudolf Winkler"],
"Rita": ["Rita", "Riter", "Rita aus Weiden"],
"Ramona": ["Ramona", "Ramoner"],
"reddit": ["Lachschon"],
"Musik hören": ["headbangen"],
"Musik gehört": ["geheadbangt"],
"dumm": ["brainschaden"],
"Energy": ["Enertschie"],
"energy": ["enertschie"],
"ein": ["a", "oa"],
"fit": ["fidd"],
"nicht hören": ["Ich hör dich ned du Spack, ich hab Kopfhörer auf", "can you not hear"],
"Job": ["Tschobb"],
"job": ["Tschobb"],
"Hater": ["Haider"],
"hater": ["Haider"],
"Hate": ["Haid"],
"hate": ["Haid"],
"zerstören": ["zermeddln"],
"zerstört": ["zermeddld"],
"kann": ["koa"],
"Boneclinks": ["Bonklix"],
"boneclinks": ["bonklix"],
"jetzt": ["etzadla"],
"ja?": ["und weida?"],
"na und?": ["und weida?"],
"na und": ["und weida?"],
"Geld": ["Barrne"],
"Euro": ["Barrne €"],
"geld": ["Barrne"],
"euro": ["Barrne €"],
"Upvote": ["Barrne"],
"upvote": ["Barrne"],
"wütend": ["neutral"],
"Wütend": ["Neutral"],
"gemacht": ["nausgrendert"],
"raus gehaun": ["nausgrendert"],
"raus hauen": ["nausrendern"],
"gegessen": ["neigschürt", "neigerendert", "neigstopft", "neimeddld", "in's gsicht gedrüggd", "gnadenlos in's gsicht nei gedrüggt"],
"fressen": ["neinschüren", "reinnrendern", "neistopfen", "neimeddln"],
"gefressen": ["neigschürt", "neigerendert", "neigstopft", "neimeddld"],
"essen": ["neinschüren", "reinnrendern", "neistopfen", "neimeddln", "in's gsicht drüggen", "gnadenlos in's gsicht drüggen"
],
"isst": ["neirenderd", "neischürd", "neistopft", "neimeddlt", "in's gsicht drüggt", "gnadenlos in's gsicht drüggt"],
"Angst": ["wie se immer zuggen"],
"angst": ["wie se immer zuggen"],
"freundin": ["froindin", "lel als ob", "Gummipubbe"],
"Freundin": ["Froindin", "Lel als ob", "Gummipubbe"],
"Gummipuppe": ["Froindin"],
"gummipuppe": ["Froindin"],
"Freund": ["Gumbl", "Froind"],
"freund": ["Gumbl", "Froind"],
"Freunde": ["ihr Wahnsinnichen"],
"Penis": ["Speer", "Pimml", "Späher", "Pensi"],
"penis": ["Speer", "Pimml", "Späher", "pensi"],
"Katana": ["Blechklinge", "hamstori hanser schwerd"],
"katana": ["Blechklinge", "hamstori hanser schwerd"],
"sagst": ["sachste"],
"programmieren": ["HDML proggrammiert", "in Gimb nei speichern"],
"sauer": ["mett"],
"Kumpel": ["Gumbl"],
"kumpel": ["gumbl"],
"Jungfrau": ["ungefickt"],
"jungfräulich": ["ungefickt"],
"Frau": ["Mulle", "Weib"],
"frau": ["mulle", "weib"],
"Frauen": ["Mullen", "Weiber"],
"bedeuten": ["imblementieren"],
"bedeutet": ["imblementiert"],
"nice": ["nais"],
"gelaufen": ["gemeddeld"],
"laufen": ["meddeln"],
"amen": ["OHNE DREGG ALDER"],
"so ist es": ["OHNE DREGG ALDER"],
"Stock": ["Sdogg"],
"mal": ["mol", "moal"],
"einma": ["amol ", "amoal "],
"einmal": ["amol ", "amoal "],
"Gimp": ["Gimb"],
"Cola": ["Koler"],
"cola": ["Koler"],
"raus": ["naus"],
"Raus": ["Naus"],
"wie heißt du": ["anaddanowanaiwa Nandeska"],
"freut mich": ["hadschimi masde"],
"Zelda": ["Zelad bride of the Wind", "Tselda bref of se wild", "Tselda"],
"Bier": ["Löschzwerg", "Vigginger-Blud"],
"Manieren": ["Alda kein Erziehung genossen?", "Eure Eltern würden sich schämen"],
"manieren": ["Alda kein Erziehung genossen?", "Eure Eltern würden sich schämen"],
"Franken": ["Meddlfranggen"],
"franken": ["meddlfranggen"],
"mittel": ["meddl"],
"Mittel": ["Meddl"],
"schlau": ["hoher Ikuh"],
"clever": ["hoher Ikuh"],
"klug": ["hoher Ikuh"],
"gut in": ["hoher Ikuh in"],
"IQ": ["Ikuh"],
"japanisch": ["jaboanisch"],
"Früher": [" mit 4 mit'm Vadda im Wald "],
"früher": [" mit 4 mit'm Vadda im Wald "],
"Life is strange": ["Life is stranger"],
"Kleinstadt": ["Klingelständer"],
"speichern": ["in Gimb neispeichern"],
"Kampfsport": ["Muay-Thai"],
"kampfsport": ["Muay-Thai"],
"hacken": ["in Gimb neispeichern", "in Gimb neirendern"],
"gehackt": ["in Gimb neispeichert", "in Gimb neigrendert"],
"arbeitet": ["Schneidet, rendert, läd-hoch", "Frisst, Wiggst, Zockt", "Led's plaiet"],
"Arbeit": ["Schneiden, rendern, hochladen", "Fressne, Wiggsne, Zockne", "Led's plaien"],
"arbeit": ["schneiden, rendern, hochladen", "fressne, Wiggsne, Zockne", "Led's plaien"],
"arbeiten": ["schneiden, rendern, hochladen", "fressne, Wiggsne, Zockne", "Led's plain"],
"besonders": ["höchste Wesne"],
"Gott": ["Dreche himself"],
"gott": ["Dreche himself"],
"Wahnsinnig": ["Wahnsinnicn"],
"Wahnsinnigen": ["Wahnsinnichen"],
"geschaut": ["gschaut"],
"rein": ["'nei"],
"arbeitslos": ["abbeidslos"],
"derjenige": ["derjeniche "],
"Derjenige": ["Derjeniche "],
"poetry slam": ["boedri slemm"],
"Poetry slam": ["boedri slemm"],
"Haut": ["kein Organ"],
"haut": ["kein Organ"],
"wtf": ["Wottse fack"],
"WTF": ["Wottse fack"],
"Wtf": ["Wottse fack"],
"what the fuck": ["Wottse fack"],
"What the fuck": ["Wottse fack"],
"Wow": ["Alta Falta"],
"wow": ["Alta Falta"],
"verpiss": ["fapiss"],
"hübsch": ["geiles stück du geiles"],
"Geil": ["geiles stück du geiles"],
"geil": ["geiles stück du geiles"],
"ist gut": ["bin gerückt"],
"Ist gut": ["Bin gerückt"],
"gut": ["richtig und wichtig!", "naise Sache"],
"nicht": ["ned"],
"auf": ["uff"],
"ist er": ["issa"],
"wieder": ["widda"],
"Pizza": ["Piddsa"],
"pizza": ["piddsa"],
"Bombe": ["risiche Bombe"],
"Böller": ["risiche Bombe"],
"Sprengstoff": ["risiche Bombe"],
"Dynamit": ["risiche Bombe"],
"TNT": ["risiche Bombe"],
"bannen": ["den Banhammer schwingen"],
"gebannt": ["vom Banhammer erschlagen"],
"bann": ["schwing den Banhammer"],
"Frosch": ["HAHAHAHA a Frosch"],
"frosch": ["HAHAHAHA a Frosch"],
"Biker": ["Beiker"],
"biker": ["beiker"],
"awesome": ["awsem"],
"Thread": ["Treid"],
"thread": ["treid"],
"Why": ["Wai"],
"why": ["wai"],
"Bandana": ["Tabaktuch"],
"Claire": ["Klähger"],
"Cloe": ["Klöten"],
"cloe": ["klöten"],
"Collage": ["Kolasche"],
"collage": ["kolasche"],
"Computer": ["Gombjuta"],
"computer": ["gombjuta"],
"Challenge": ["Schälensch"],
"challenge": ["schälensch"],
"Details": ["Deteis"],
"details": ["deteis"],
"Flughafen": ["Flugstation"],
"flughafen": ["flugstation"],
"Footage": ["Futitch"],
"footage": ["futitch"],
"Genre": ["Genere"],
"genre": ["genere"],
"Hass": ["-"],
"hass": ["-"],
"Interview": ["Intervieu"],
"interview": ["intervieu"],
"Ironie": ["Iruni"],
"ironie": ["iruni"],
"Mettwurst": ["Meddworsd"],
"mettwurst": ["meddworsd"],
"Mittwoch": ["Mettwoch", "Meddlwoch"],
"mittwoch": ["mettwoch", "meddlwoch"],
"nationalsozialistisch": ["narzistisch"],
"Nationalsozialist": ["Narzist"],
"nationalsozialist": ["narzist"],
"Paparazzi": ["Parpazins"],
"paparazzi": ["parpazins"],
"PC": ["Bezeh"],
"Pc": ["Bezeh"],
"pc": ["Bezeh"],
"Pointe": ["Puernte"],
"pointe": ["puernte"],
"Realtalk": ["Real Tack"],
"realtalk": ["Real Tack"],
"recherchieren": ["reschaschieren"],
"Snapchat": ["Snapchet"],
"snapchat": ["snapchet"],
"Spaghetti": ["Spaketie"],
"spaghetti": ["spaketie"],
"Summer Breeze": ["Zummer Briis"],
"summer breeze": ["Zummer Briis"],
"summerbreeze": ["Zummer Briis"],
"Summerbreeze": ["Zummer Briis"],
"Twitter": ["Twidda"],
"twitter": ["Twidda"],
"Weiter so": ["Meddl on"],
"weiter so": ["meddl on"],
"Polizist": ["Herr Müller"],
"polizist": ["Herr Müller"],
"im ernst": ["ohne Spass alda", "ohne dregg alda"],
"im Ernst": ["ohne Spass alda", "ohne dregg alda"],
"Verhütung": ["Ferienhütte"],
"verhütung": ["Ferienhütte"],
"Verprügeln": ["Brügel rausschmeißen"],
"verprügeln": ["Brügel rausschmeißen"],
"Younow": ["Junau"],
"younow": ["Junau"],
"you now": ["Junau"],
"You Now": ["Junau"],
"You-Now": ["Junau"],
"you-now": ["Junau"],
"Youtube": ["Jutjub"],
"YouTube": ["Jutjub"],
"youtube": ["Jutjub"],
"You tube": ["Jutjub"],
"You Tube": ["Jutjub"],
"You-tube": ["Jutjub"],
"You-Tube": ["Jutjub"],
"typ": ["düb"],
"Typ": ["Düb"],
"dünn": ["dünn (Du meintest wohl fett)", "hahaha"],
"Dünn": ["Dünn (Du meintest wohl fett)", "hahaha"],
"schlank": ["schlank (Du meintest wohl fett)", "hahaha"],
"Schlank": ["Schlank (Du meintest wohl fett)", "hahaha"],
"Gehirn": ["Hörn"],
"gehirn": ["hörn"],
"Rewe": ["Zwingel"],
"rewe": ["Zwingel"],
"Ezio": ["ETZIO, ALDA", "ETZIO", "ÄZZIOH", "ÄZZIO DU BIST EIN VERDAMMTER HURENSOHN"],
"ETZIO": ["ETZIO, ALDA", "ETZIO", "ÄZZIOH", "ÄZZIO DU BIST EIN VERDAMMTER HURENSOHN"],
"trivial": ["trivago"],
"Trivial": ["Trivago"],
"Photoshop": ["fotoschobb"],
"dass": ["das"],
"wenn": ["wen"],
"ficken": ["positzioniren", "bositzioniren"],
"wichsen": ["wiggsne"],
"masturbieren": ["wiggsne"],
"Sexschreiben": ["seggsschreibne"],
"PornHub": ["PH"],
"Pornhub": ["PH"],
"pornhub": ["PH"],
"warum": ["warummäh"],
"Warum": ["Warummäh"],
"Juden": ["jetzt der Drache"],
"juden": ["jetzt der Drache"],
"Jude": ["jetzt der Drache"],
"jude": ["jetzt der Drache"],
"TL;DR": ["könnt ihr selber lesen, bin zu faul"],
"tl;dr": ["könnt ihr selber lesen, bin zu faul"],
"Danke": ["Danke dafür iBlali"],
"weniger": ["wenicher"],
"wenig": ["a weng"],
"fake": ["feek"],
"Fakemulle": ["Feekmulle", "Beerchen"],
"Halts Maul": ["HALT *atmen* DEINE *atmen* FRESSE"],
"halts Maul": ["HALT *atmen* DEINE *atmen* FRESSE"],
"Halts maul": ["HALT *atmen* DEINE *atmen* FRESSE"],
"halts maul": ["HALT *atmen* DEINE *atmen* FRESSE"],
"tatsächlich": ["tazächlich", "tadzächlich", "etzala tazächlich sogar", "etzala tadzächlich sogar"],
"Besuche": ["Pilgerreisen"],
"Besuch": ["Pilgerreise"],
"implizier": ["implementier"],
"Mund": ["Schlund"],
"Gerade": ["Grad", "Jeds grad", "Etzadla grad", "Etzadla"],
"gerade": ["grad", "jeds grad", "etzadla grad", "etzadla"],
"...": ["und so", "und so weida", "joaa", "ähh", "... ..."],
"Metal": ["Meddl"],
"metal": ["meddl"]
}
questionMark = [
"? Oda wat?",
", hä?",
" und weida?",
*["?"]*3
]
exclamationMark = [
", etzala!",
", tazächlich!",
", tazächlich sogar!",
", hätt ich gsachd!",
", alda!",
", des bascht scho!",
"!",
"!"
]
dot = [" hätt ich gsachd.",
", des bascht scho.",
", alda.",
", tadsächlich sogar.",
". Etzala.",
*["."]*4
]
colon = [", des heißd im Glaadegsd:",
":",
" häd ich jeds gsacht:"
]
twistedChars = {
"hen$": ["hn$"],
"gegen": ["gechen"],
"Gegen": ["Gechen"],
"en$": ["ne$"],
"tz": ["ds"],
"tel$": ["dl$"],
"ph": ["f"],
"Ph": ["F"],
"pa": ["boa"],
"ck": ["gg"],
"Sp": ["Schb"],
"sp": ["schb"],
"St": ["Schd"],
"st": ["schd"],
"eug$": ["euch$"],
"t": ["d"],
"T": ["D"],
"v": ["f"],
"V": ["F"],
"v": ["f"],
"v": ["w"],
"V": ["W"],
"xx": ["ggs"],
"XX": ["gss"],
"x": ["ggs"],
"X": ["ggs"],
"p": ["b"],
"P": ["B"],
"k": ["g"],
"K": ["G"],
"^gem": ["^gm"],
"^Gem": ["^Gm"],
"^ges": ["^gs"],
"^Ges": ["Gs"],
"tag$": ["dach$"],
"...$": [" und so$", " und so weida$", " joaa$", " ähh$", "... ...$"]
}
quotationMark = [" *ferstellt Stimme:* \"Üh, "]
punctuations = {'.': dot, '!': exclamationMark, '?': questionMark, ',': [','], ':': colon}
|
the-stack_0_27978
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Print a list of pages, as defined by page generator parameters.
Optionally, it also prints page content to STDOUT or save it to a file
in the current directory.
These parameters are supported to specify which pages titles to print:
-format Defines the output format.
Can be a custom string according to python string.format() notation
or can be selected by a number from following list
(1 is default format):
1 - u'{num:4d} {page.title}'
--> 10 PageTitle
2 - u'{num:4d} [[{page.title}]]'
--> 10 [[PageTitle]]
3 - u'{page.title}'
--> PageTitle
4 - u'[[{page.title}]]'
--> [[PageTitle]]
5 - u'{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}'
--> 10 localised_Namespace:PageTitle (colorised in lightred)
6 - u'{num:4d} {page.loc_title:<40} {page.can_title:<40}'
--> 10 localised_Namespace:PageTitle
canonical_Namespace:PageTitle
7 - u'{num:4d} {page.loc_title:<40} {page.trs_title:<40}'
--> 10 localised_Namespace:PageTitle
outputlang_Namespace:PageTitle
(*) requires "outputlang:lang" set.
num is the sequential number of the listed page.
An empty format is equal to -notitle and just shows the total
amount of pages.
-outputlang Language for translation of namespaces.
-notitle Page title is not printed.
-get Page content is printed.
-save Save Page content to a file named as page.title(as_filename=True).
Directory can be set with -save:dir_name
If no dir is specified, current direcory will be used.
-encode File encoding can be specified with '-encode:name' (name must be
a valid python encoding: utf-8, etc.).
If not specified, it defaults to config.textfile_encoding.
-put: Save the list to the defined page of the wiki. By default it does
not overwrite an exisiting page.
-overwrite Overwrite the page if it exists. Can only by applied with -put.
-summary: The summary text when the page is written. If it's one word just
containing letters, dashes and underscores it uses that as a
translation key.
Custom format can be applied to the following items extrapolated from a
page object:
site: obtained from page._link._site.
title: obtained from page._link._title.
loc_title: obtained from page._link.canonical_title().
can_title: obtained from page._link.ns_title().
based either the canonical namespace name or on the namespace name
in the language specified by the -trans param;
a default value '******' will be used if no ns is found.
onsite: obtained from pywikibot.Site(outputlang, self.site.family).
trs_title: obtained from page._link.ns_title(onsite=onsite).
If selected format requires trs_title, outputlang must be set.
¶ms;
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import re
import pywikibot
from pywikibot import config2 as config, i18n
from pywikibot.pagegenerators import GeneratorFactory, parameterHelp
docuReplacements = {'¶ms;': parameterHelp}
class Formatter(object):
"""Structure with Page attributes exposed for formatting from cmd line."""
fmt_options = {
'1': u"{num:4d} {page.title}",
'2': u"{num:4d} [[{page.title}]]",
'3': u"{page.title}",
'4': u"[[{page.title}]]",
'5': u"{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}",
'6': u"{num:4d} {page.loc_title:<40} {page.can_title:<40}",
'7': u"{num:4d} {page.loc_title:<40} {page.trs_title:<40}",
}
# Identify which formats need outputlang
fmt_need_lang = [k for k, v in fmt_options.items() if 'trs_title' in v]
def __init__(self, page, outputlang=None, default='******'):
"""
Constructor.
@param page: the page to be formatted.
@type page: Page object.
@param outputlang: language code in which namespace before title should
be translated.
Page ns will be searched in Site(outputlang, page.site.family)
and, if found, its custom name will be used in page.title().
@type outputlang: str or None, if no translation is wanted.
@param default: default string to be used if no corresponding
namespace is found when outputlang is not None.
"""
self.site = page._link.site
self.title = page._link.title
self.loc_title = page._link.canonical_title()
self.can_title = page._link.ns_title()
self.outputlang = outputlang
if outputlang is not None:
# Cache onsite in case of translations.
if not hasattr(self, "onsite"):
self.onsite = pywikibot.Site(outputlang, self.site.family)
try:
self.trs_title = page._link.ns_title(onsite=self.onsite)
# Fallback if no corresponding namespace is found in onsite.
except pywikibot.Error:
self.trs_title = u'%s:%s' % (default, page._link.title)
def output(self, num=None, fmt=1):
"""Output formatted string."""
fmt = self.fmt_options.get(fmt, fmt)
# If selected format requires trs_title, outputlang must be set.
if (fmt in self.fmt_need_lang or
'trs_title' in fmt and
self.outputlang is None):
raise ValueError(
u"Required format code needs 'outputlang' parameter set.")
if num is None:
return fmt.format(page=self)
else:
return fmt.format(num=num, page=self)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
notitle = False
fmt = '1'
outputlang = None
page_get = False
base_dir = None
encoding = config.textfile_encoding
page_target = None
overwrite = False
summary = 'listpages-save-list'
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = GeneratorFactory()
for arg in local_args:
option, sep, value = arg.partition(':')
if option == '-notitle':
notitle = True
elif option == '-format':
fmt = value.replace('\\03{{', '\03{{')
if not fmt.strip():
notitle = True
elif option == '-outputlang:':
outputlang = value
elif option == '-get':
page_get = True
elif option == '-save':
base_dir = value or '.'
elif option == '-encode':
encoding = value
elif option == '-put':
page_target = value
elif option == '-overwrite':
overwrite = True
elif option == '-summary':
summary = value
else:
genFactory.handleArg(arg)
if base_dir:
base_dir = os.path.expanduser(base_dir)
if not os.path.isabs(base_dir):
base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))
if not os.path.exists(base_dir):
pywikibot.output(u'Directory "%s" does not exist.' % base_dir)
choice = pywikibot.input_yn(
u'Do you want to create it ("No" to continue without saving)?')
if choice:
os.makedirs(base_dir, mode=0o744)
else:
base_dir = None
elif not os.path.isdir(base_dir):
# base_dir is a file.
pywikibot.warning(u'Not a directory: "%s"\n'
u'Skipping saving ...'
% base_dir)
base_dir = None
if page_target:
site = pywikibot.Site()
page_target = pywikibot.Page(site, page_target)
if not overwrite and page_target.exists():
pywikibot.bot.suggest_help(
additional_text='Page {0} already exists.\n'
'You can use the -overwrite argument to '
'replace the content of this page.'
.format(page_target.title(asLink=True)))
return False
if re.match('^[a-z_-]+$', summary):
summary = i18n.twtranslate(site, summary)
gen = genFactory.getCombinedGenerator()
if gen:
i = 0
output_list = []
for i, page in enumerate(gen, start=1):
if not notitle:
page_fmt = Formatter(page, outputlang)
output_list += [page_fmt.output(num=i, fmt=fmt)]
pywikibot.stdout(output_list[-1])
if page_get:
try:
pywikibot.stdout(page.text)
except pywikibot.Error as err:
pywikibot.output(err)
if base_dir:
filename = os.path.join(base_dir, page.title(as_filename=True))
pywikibot.output(u'Saving %s to %s' % (page.title(), filename))
with open(filename, mode='wb') as f:
f.write(page.text.encode(encoding))
pywikibot.output(u"%i page(s) found" % i)
if page_target:
page_target.text = '\n'.join(output_list)
page_target.save(summary=summary)
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == "__main__":
main()
|
the-stack_0_27979
|
"""Implement your own custom search agent using any combination of techniques
you choose. This agent will compete against other students (and past
champions) in a tournament.
COMPLETING AND SUBMITTING A COMPETITION AGENT IS OPTIONAL
"""
import random
from math import sqrt
class SearchTimeout(Exception):
"""Subclass base exception for code clarity. """
pass
def custom_score(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w, h = game.width / 2., game.height / 2.
y_me, x_me = game.get_player_location(player)
dist_center = sqrt((h - y_me) ** 2 + (w - x_me) ** 2)
future_moves = 0
for move in game.get_legal_moves(player):
_ = len(game.forecast_move(move).get_legal_moves(player))
if _ > future_moves:
future_moves = _
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(2*future_moves + (own_moves - opp_moves)/(1 + dist_center))
class CustomPlayer:
def __init__(self, data=None, timeout=1.):
self.score = custom_score
self.time_left = None
self.TIMER_THRESHOLD = timeout
def get_move(self, game, time_left, it_dp=True):
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
if it_dp:
total_depth = 1
while True:
depth = total_depth
best_move = self.alphabeta(game, depth)
total_depth += 1
else:
best_move = self.alphabeta(game, self.search_depth)
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf")):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
best_score = float("-inf")
best_move = None
for move in game.get_legal_moves():
v = self.min_value(game.forecast_move(move), depth - 1, alpha, beta)
if v > best_score:
best_score = v
best_move = move
alpha = max(alpha, v)
return best_move
def max_value(self, game, depth, alpha, beta):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if depth <= 0:
return self.score(game, self)
v = float('-inf')
for move in game.get_legal_moves():
v = max(v, self.min_value(game.forecast_move(move), depth - 1, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(self, game, depth, alpha, beta):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if depth <= 0:
return self.score(game, self)
v = float('inf')
for move in game.get_legal_moves():
v = min(v, self.max_value(game.forecast_move(move), depth - 1, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
|
the-stack_0_27981
|
######################################################################################
#
# Copyright (c) 2015 Twist Bioscience
#
# File: app/routes/pages.py
#
# These are the handlers for all the web pages of the application. (These are not JSON/REST routes, they
# are only web page routes.)
#
######################################################################################
from flask import g, render_template
from app import db
#
# This is the "home" page, which is actually the "enter a sample movement" page.
#
def home():
raise DeprecationWarning
sample_transfer_types = db.session.query(TransferType).order_by(TransferType.name)
return render_template('recordTransfer.html',sample_transfer_types=sample_transfer_types,
current_user_first_and_last=g.user.first_and_last_name)
#
# The list of sample transfers
#
def sample_transfers_page():
raise DeprecationWarning
rows = db.session.query(Transfer, TransferDetail).filter(
TransferDetail.transfer_id==Transfer.id).order_by(
Transfer.date_transfer.desc()).all()
sample_transfer_details = []
seen = []
for transfer,details in rows:
if (transfer.id,details.source_plate_id,details.destination_plate_id) not in seen:
seen.append((transfer.id,details.source_plate_id,details.destination_plate_id))
sample_transfer_details.append((transfer,details))
return render_template('viewTransfers.html',
sample_transfer_details=sample_transfer_details,
current_user_first_and_last=g.user.first_and_last_name)
#
# This is the page allowing the user to add a barcode to a sample plate.
#
def edit_sample_plate():
raise DeprecationWarning
return render_template('edit_plate.html',current_user_first_and_last=g.user.first_and_last_name)
#
# This is "Sample Report" page
#
def sample_report_page(sample_id):
raise DeprecationWarning
return render_template('sample_report.html',sample_id=sample_id,current_user_first_and_last=g.user.first_and_last_name)
#
# This is the "Plate Details Report" page
#
def plate_report_page(plate_barcode):
raise DeprecationWarning
return render_template('plate_report.html',plate_barcode=plate_barcode,current_user_first_and_last=g.user.first_and_last_name)
|
the-stack_0_27982
|
#############################################################################################
# CREATOR: ANJAL.P #
# ON: 2020 NOV. #
# AIM: To Extend the capability of the PySide2 and PyQt5 Python library with easy to #
# use extension containing commonly used widgets which is not natively supported #
# by the Qt Frame work (or atleast for Python version of Qt). #
# VERSION: v1.0.0 #
# NOTES: Demo Application #
# REFER: Github: https://github.com/anjalp/PySide2extn #
#############################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
from PySide2extn.RoundProgressBar import roundProgressBar
from PySide2extn.SpiralProgressBar import spiralProgressBar
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(800, 415)
MainWindow.setMinimumSize(QSize(800, 415))
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.gridLayout = QGridLayout(self.centralwidget)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tab = QWidget()
self.tab.setObjectName(u"tab")
self.verticalLayout = QVBoxLayout(self.tab)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.widget = QWidget(self.tab)
self.widget.setObjectName(u"widget")
self.gridLayout_2 = QGridLayout(self.widget)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.b1 = QPushButton(self.widget)
self.b1.setObjectName(u"b1")
self.b1.setMinimumSize(QSize(20, 20))
self.b1.setMaximumSize(QSize(20, 20))
self.gridLayout_2.addWidget(self.b1, 0, 1, 1, 1)
self.b3 = QPushButton(self.widget)
self.b3.setObjectName(u"b3")
self.b3.setMinimumSize(QSize(20, 20))
self.b3.setMaximumSize(QSize(20, 20))
self.gridLayout_2.addWidget(self.b3, 0, 5, 1, 1)
self.rpb4 = roundProgressBar(self.widget)
self.rpb4.setObjectName(u"rpb4")
self.gridLayout_2.addWidget(self.rpb4, 0, 6, 3, 1)
self.rpb3 = roundProgressBar(self.widget)
self.rpb3.setObjectName(u"rpb3")
self.gridLayout_2.addWidget(self.rpb3, 0, 4, 3, 1)
self.rpb2 = roundProgressBar(self.widget)
self.rpb2.setObjectName(u"rpb2")
self.rpb2.setStyleSheet(u"")
self.gridLayout_2.addWidget(self.rpb2, 0, 2, 3, 1)
self.rpb1 = roundProgressBar(self.widget)
self.rpb1.setObjectName(u"rpb1")
self.gridLayout_2.addWidget(self.rpb1, 0, 0, 3, 1)
self.b2 = QPushButton(self.widget)
self.b2.setObjectName(u"b2")
self.b2.setMinimumSize(QSize(20, 20))
self.b2.setMaximumSize(QSize(20, 20))
self.gridLayout_2.addWidget(self.b2, 2, 1, 1, 1)
self.vs1 = QSlider(self.widget)
self.vs1.setObjectName(u"vs1")
self.vs1.setOrientation(Qt.Vertical)
self.gridLayout_2.addWidget(self.vs1, 0, 3, 3, 1)
self.b4 = QPushButton(self.widget)
self.b4.setObjectName(u"b4")
self.b4.setMinimumSize(QSize(20, 20))
self.b4.setMaximumSize(QSize(20, 20))
self.gridLayout_2.addWidget(self.b4, 2, 5, 1, 1)
self.verticalLayout.addWidget(self.widget)
self.hs1 = QSlider(self.tab)
self.hs1.setObjectName(u"hs1")
self.hs1.setOrientation(Qt.Horizontal)
self.verticalLayout.addWidget(self.hs1)
self.widget_2 = QWidget(self.tab)
self.widget_2.setObjectName(u"widget_2")
self.gridLayout_3 = QGridLayout(self.widget_2)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.vs2 = QSlider(self.widget_2)
self.vs2.setObjectName(u"vs2")
self.vs2.setOrientation(Qt.Vertical)
self.vs2.setInvertedAppearance(True)
self.gridLayout_3.addWidget(self.vs2, 0, 3, 4, 1)
self.b6 = QPushButton(self.widget_2)
self.b6.setObjectName(u"b6")
self.b6.setMinimumSize(QSize(20, 20))
self.b6.setMaximumSize(QSize(20, 20))
self.gridLayout_3.addWidget(self.b6, 3, 5, 1, 1)
self.b5 = QPushButton(self.widget_2)
self.b5.setObjectName(u"b5")
self.b5.setMinimumSize(QSize(20, 20))
self.b5.setMaximumSize(QSize(20, 20))
self.gridLayout_3.addWidget(self.b5, 0, 5, 1, 1)
self.b8 = QPushButton(self.widget_2)
self.b8.setObjectName(u"b8")
self.b8.setMinimumSize(QSize(20, 20))
self.b8.setMaximumSize(QSize(20, 20))
self.gridLayout_3.addWidget(self.b8, 3, 1, 1, 1)
self.rpb5 = roundProgressBar(self.widget_2)
self.rpb5.setObjectName(u"rpb5")
self.gridLayout_3.addWidget(self.rpb5, 0, 0, 4, 1)
self.b7 = QPushButton(self.widget_2)
self.b7.setObjectName(u"b7")
self.b7.setMinimumSize(QSize(20, 20))
self.b7.setMaximumSize(QSize(20, 20))
self.gridLayout_3.addWidget(self.b7, 0, 1, 1, 1)
self.rpb7 = roundProgressBar(self.widget_2)
self.rpb7.setObjectName(u"rpb7")
self.gridLayout_3.addWidget(self.rpb7, 0, 4, 4, 1)
self.rpb8 = roundProgressBar(self.widget_2)
self.rpb8.setObjectName(u"rpb8")
self.gridLayout_3.addWidget(self.rpb8, 0, 6, 4, 1)
self.rpb6 = roundProgressBar(self.widget_2)
self.rpb6.setObjectName(u"rpb6")
self.gridLayout_3.addWidget(self.rpb6, 0, 2, 4, 1)
self.verticalLayout.addWidget(self.widget_2)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.verticalLayout_2 = QVBoxLayout(self.tab_2)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.widget_3 = QWidget(self.tab_2)
self.widget_3.setObjectName(u"widget_3")
self.gridLayout_4 = QGridLayout(self.widget_3)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName(u"gridLayout_4")
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.pushButton_9 = QPushButton(self.widget_3)
self.pushButton_9.setObjectName(u"pushButton_9")
self.pushButton_9.setMinimumSize(QSize(20, 20))
self.pushButton_9.setMaximumSize(QSize(20, 20))
self.gridLayout_4.addWidget(self.pushButton_9, 0, 1, 1, 1)
self.pushButton_11 = QPushButton(self.widget_3)
self.pushButton_11.setObjectName(u"pushButton_11")
self.pushButton_11.setMinimumSize(QSize(20, 20))
self.pushButton_11.setMaximumSize(QSize(20, 20))
self.gridLayout_4.addWidget(self.pushButton_11, 0, 5, 1, 1)
self.pushButton_10 = QPushButton(self.widget_3)
self.pushButton_10.setObjectName(u"pushButton_10")
self.pushButton_10.setMinimumSize(QSize(20, 20))
self.pushButton_10.setMaximumSize(QSize(20, 20))
self.gridLayout_4.addWidget(self.pushButton_10, 3, 1, 1, 1)
self.vs3 = QSlider(self.widget_3)
self.vs3.setObjectName(u"vs3")
self.vs3.setOrientation(Qt.Vertical)
self.gridLayout_4.addWidget(self.vs3, 0, 3, 4, 1)
self.spb2 = spiralProgressBar(self.widget_3)
self.spb2.setObjectName(u"spb2")
self.gridLayout_4.addWidget(self.spb2, 0, 2, 4, 1)
self.spb1 = spiralProgressBar(self.widget_3)
self.spb1.setObjectName(u"spb1")
self.gridLayout_4.addWidget(self.spb1, 0, 0, 4, 1)
self.spb3 = spiralProgressBar(self.widget_3)
self.spb3.setObjectName(u"spb3")
self.gridLayout_4.addWidget(self.spb3, 0, 4, 4, 1)
self.pushButton_12 = QPushButton(self.widget_3)
self.pushButton_12.setObjectName(u"pushButton_12")
self.pushButton_12.setMinimumSize(QSize(20, 20))
self.pushButton_12.setMaximumSize(QSize(20, 20))
self.gridLayout_4.addWidget(self.pushButton_12, 3, 5, 1, 1)
self.spb4 = spiralProgressBar(self.widget_3)
self.spb4.setObjectName(u"spb4")
self.gridLayout_4.addWidget(self.spb4, 0, 6, 4, 1)
self.verticalLayout_2.addWidget(self.widget_3)
self.hs2 = QSlider(self.tab_2)
self.hs2.setObjectName(u"hs2")
self.hs2.setOrientation(Qt.Horizontal)
self.verticalLayout_2.addWidget(self.hs2)
self.widget_4 = QWidget(self.tab_2)
self.widget_4.setObjectName(u"widget_4")
self.gridLayout_5 = QGridLayout(self.widget_4)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName(u"gridLayout_5")
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.pushButton_15 = QPushButton(self.widget_4)
self.pushButton_15.setObjectName(u"pushButton_15")
self.pushButton_15.setMinimumSize(QSize(20, 20))
self.pushButton_15.setMaximumSize(QSize(20, 20))
self.gridLayout_5.addWidget(self.pushButton_15, 0, 5, 1, 1)
self.pushButton_13 = QPushButton(self.widget_4)
self.pushButton_13.setObjectName(u"pushButton_13")
self.pushButton_13.setMinimumSize(QSize(20, 20))
self.pushButton_13.setMaximumSize(QSize(20, 20))
self.gridLayout_5.addWidget(self.pushButton_13, 0, 1, 1, 1)
self.pushButton_16 = QPushButton(self.widget_4)
self.pushButton_16.setObjectName(u"pushButton_16")
self.pushButton_16.setMinimumSize(QSize(20, 20))
self.pushButton_16.setMaximumSize(QSize(20, 20))
self.gridLayout_5.addWidget(self.pushButton_16, 3, 5, 1, 1)
self.pushButton_14 = QPushButton(self.widget_4)
self.pushButton_14.setObjectName(u"pushButton_14")
self.pushButton_14.setMinimumSize(QSize(20, 20))
self.pushButton_14.setMaximumSize(QSize(20, 20))
self.gridLayout_5.addWidget(self.pushButton_14, 3, 1, 1, 1)
self.spb5 = spiralProgressBar(self.widget_4)
self.spb5.setObjectName(u"spb5")
self.gridLayout_5.addWidget(self.spb5, 0, 0, 4, 1)
self.spb6 = spiralProgressBar(self.widget_4)
self.spb6.setObjectName(u"spb6")
self.gridLayout_5.addWidget(self.spb6, 0, 2, 4, 1)
self.spb7 = spiralProgressBar(self.widget_4)
self.spb7.setObjectName(u"spb7")
self.gridLayout_5.addWidget(self.spb7, 0, 4, 4, 1)
self.spb8 = spiralProgressBar(self.widget_4)
self.spb8.setObjectName(u"spb8")
self.gridLayout_5.addWidget(self.spb8, 0, 6, 4, 1)
self.vs4 = QSlider(self.widget_4)
self.vs4.setObjectName(u"vs4")
self.vs4.setOrientation(Qt.Vertical)
self.vs4.setInvertedAppearance(True)
self.gridLayout_5.addWidget(self.vs4, 0, 3, 4, 1)
self.verticalLayout_2.addWidget(self.widget_4)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"Demo Application PySide2extn", None))
self.b1.setText(QCoreApplication.translate("MainWindow", u"S", None))
self.b3.setText(QCoreApplication.translate("MainWindow", u"D", None))
self.b2.setText(QCoreApplication.translate("MainWindow", u"I", None))
self.b4.setText(QCoreApplication.translate("MainWindow", u"R", None))
self.b6.setText(QCoreApplication.translate("MainWindow", u"I", None))
self.b5.setText(QCoreApplication.translate("MainWindow", u"S", None))
self.b8.setText(QCoreApplication.translate("MainWindow", u"D", None))
self.b7.setText(QCoreApplication.translate("MainWindow", u"R", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QCoreApplication.translate("MainWindow", u"RoundProgressBar", None))
self.pushButton_9.setText(QCoreApplication.translate("MainWindow", u"R", None))
self.pushButton_11.setText(QCoreApplication.translate("MainWindow", u"S", None))
self.pushButton_10.setText(QCoreApplication.translate("MainWindow", u"D", None))
self.pushButton_12.setText(QCoreApplication.translate("MainWindow", u"I", None))
self.pushButton_15.setText(QCoreApplication.translate("MainWindow", u"R", None))
self.pushButton_13.setText(QCoreApplication.translate("MainWindow", u"I", None))
self.pushButton_16.setText(QCoreApplication.translate("MainWindow", u"D", None))
self.pushButton_14.setText(QCoreApplication.translate("MainWindow", u"S", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QCoreApplication.translate("MainWindow", u"SpiralProgressBar", None))
self.addDesignTothis()
def addDesignTothis(self):
self.widget_rpb()
self.widget_spb()
def widget_rpb(self):
self.rpb1.rpb_setValue(55)
self.rpb2.rpb_setValue(84)
self.rpb3.rpb_setValue(0)
self.rpb4.rpb_setValue(46)
self.rpb5.rpb_setValue(75)
self.rpb6.rpb_setValue(66)
self.rpb7.rpb_setValue(5)
self.rpb8.rpb_setValue(95)
self.rpb2.rpb_setBarStyle("Line")
self.rpb2.rpb_setLineColor((0, 10, 15))
self.rpb2.rpb_setTextColor((0, 10, 15))
self.rpb3.rpb_setBarStyle("Pie")
self.rpb3.rpb_setMaximum(360)
self.rpb3.rpb_setTextFormat('Value')
self.rpb3.rpb_setTextColor((210, 240, 210))
self.rpb3.rpb_setPieColor((0, 125, 125))
self.rpb4.rpb_setBarStyle("Pizza")
self.rpb4.rpb_setRange(0, 200)
self.rpb4.rpb_setCircleColor((210, 100, 0))
self.rpb4.rpb_setLineColor((160, 50, 0))
self.rpb4.rpb_setTextColor((250, 250, 250))
self.rpb4.rpb_setCircleRatio(1)
self.rpb5.rpb_setBarStyle("Hybrid1")
self.rpb5.rpb_setRange(0, 360)
self.rpb5.rpb_setTextFormat('Value')
self.rpb5.rpb_setPathWidth(2)
self.rpb5.rpb_setLineWidth(8)
self.rpb5.rpb_setPathColor((100, 100, 100))
self.rpb5.rpb_setCircleColor((100, 100, 100))
self.rpb5.rpb_setTextColor((250, 250, 250))
self.rpb6.rpb_setBarStyle("Hybrid2")
self.rpb8.rpb_setBarStyle("Hybrid1")
self.rpb8.rpb_setRange(0, 360)
self.rpb7.rpb_setLineWidth(2)
self.rpb7.rpb_setLineColor((20, 20, 20))
self.rpb7.rpb_setMaximum(200)
self.rpb7.rpb_enableText(False)
self.rpb7.rpb_setPathWidth(8)
self.hs1.valueChanged.connect(lambda: self.rpb1.rpb_setValue(self.hs1.value()))
self.hs1.valueChanged.connect(lambda: self.rpb8.rpb_setValue(100 - self.hs1.value()))
self.hs1.valueChanged.connect(lambda: self.rpb4.rpb_setValue(200 - self.hs1.value()))
self.hs1.valueChanged.connect(lambda: self.rpb5.rpb_setValue(2*self.hs1.value()))
self.vs1.valueChanged.connect(lambda: self.rpb2.rpb_setValue(self.vs1.value()))
self.vs1.valueChanged.connect(lambda: self.rpb3.rpb_setValue(360 - self.vs1.value()))
self.vs2.valueChanged.connect(lambda: self.rpb6.rpb_setValue(self.vs2.value()))
self.vs2.valueChanged.connect(lambda: self.rpb7.rpb_setValue(self.vs2.value()))
self.b1.clicked.connect(lambda: self.rpb1.rpb_setLineColor((128, 40, 152)))
self.b2.clicked.connect(lambda: self.rpb8.rpb_setCircleColor((0, 192, 175)))
self.b5.clicked.connect(lambda: self.rpb6.rpb_setTextColor((0, 192, 175)))
self.b3.clicked.connect(lambda: self.rpb3.rpb_setValue(0))
self.b4.clicked.connect(lambda: self.rpb3.rpb_setValue(100))
self.b6.clicked.connect(lambda: self.rpb5.rpb_setValue(0))
self.b7.clicked.connect(lambda: self.rpb6.rpb_setValue(100))
self.b8.clicked.connect(lambda: self.rpb8.rpb_setValue(360))
def widget_spb(self):
self.spb1.spb_setValue((82, 56, 5))
self.spb2.spb_setNoProgressBar(2)
self.spb2.spb_lineWidth(15)
self.spb2.spb_setGap(18)
self.spb2.spb_setValue((65, 60))
self.spb2.spb_lineColor(((28, 129, 196), (90, 193, 211)))
self.spb2.spb_pathColor(((195, 225, 242), (208, 234, 244)))
self.spb3.spb_setRange((0, 0, 0), (360, 360, 360))
self.spb3.spb_lineWidth(15)
self.spb3.spb_setGap(17)
self.spb3.spb_setInitialPos(('East', 'East', 'East'))
self.spb3.spb_setValue((246, 315, 198))
self.spb3.spb_setPathHidden(True)
self.spb4.spb_setNoProgressBar(6)
self.spb4.spb_lineWidth(10)
self.spb4.spb_setGap(11)
self.spb4.spb_setValue((59, 16, 27, 65, 84, 95))
self.spb5.spb_lineStyle(('DotLine', 'DotLine', 'DotLine'))
self.spb5.spb_setValue((65, 90, 25))
self.spb6.spb_setNoProgressBar(5)
self.spb6.spb_lineWidth(10)
self.spb6.spb_setGap(11)
self.spb6.spb_setDirection(('Clockwise', 'AntiClockwise', 'AntiClockwise', 'Clockwise', 'Clockwise'))
self.spb6.spb_setValue((65, 25, 86, 45, 75))
self.spb7.spb_setGap(12)
self.spb7.variableWidth(True)
self.spb7.spb_widthIncrement(2)
self.spb8.spb_lineWidth(8)
self.spb8.spb_setGap(9)
self.spb8.spb_lineCap(('RoundCap', 'SquareCap', 'SquareCap'))
self.spb8.spb_setValue((65, 23, 95))
self.hs2.valueChanged.connect(lambda: self.spb1.spb_setValue((self.hs2.value(), self.hs2.value()*1.5, self.hs2.value()*1.75)))
self.hs2.valueChanged.connect(lambda: self.spb4.spb_setValue((self.hs2.value()*1.25, self.hs2.value()*1.35, self.hs2.value()*1, self.hs2.value()*1.75, self.hs2.value()*1.55, self.hs2.value()*0.45)))
self.hs2.valueChanged.connect(lambda: self.spb5.spb_setValue((360 - self.hs2.value()*3.6, 360 - self.hs2.value()*4, 360 - self.hs2.value()*4.2)))
self.hs2.valueChanged.connect(lambda: self.spb8.spb_setValue((self.hs2.value(), self.hs2.value()*1.26, self.hs2.value()*2)))
self.vs3.valueChanged.connect(lambda: self.spb2.spb_setValue((100 - self.vs3.value()*1.2, 100 - self.vs3.value())))
self.vs3.valueChanged.connect(lambda: self.spb3.spb_setValue((self.vs3.value()*3.6, 3.6*0.75*self.vs3.value(), 3.6*0.5*self.vs3.value())))
self.vs4.valueChanged.connect(lambda: self.spb6.spb_setValue((self.vs4.value(), self.vs4.value()*0.9, self.vs4.value()*0.7, self.vs4.value()*0.6, self.vs4.value()*0.5)))
self.vs4.valueChanged.connect(lambda: self.spb7.spb_setValue((self.vs4.value(), self.vs4.value(), self.vs4.value())))
def main():
import sys
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
the-stack_0_27983
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from nnabla.config import nnabla_config
from nnabla.utils.create_cache import CreateCache
from nnabla.utils.data_source import DataSourceWithFileCache
from nnabla.utils.data_source_implements import CacheDataSource, CsvDataSource
from tqdm import tqdm
def _convert(args, source):
_, ext = os.path.splitext(args.destination)
if ext.lower() == '.cache':
with DataSourceWithFileCache(source, cache_dir=args.destination, shuffle=args.shuffle) as ds:
print('Number of Data: {}'.format(ds.size))
print('Shuffle: {}'.format(args.shuffle))
print('Normalize: {}'.format(args.normalize))
pbar = None
if nnabla_config.get('MISC', 'misc_show_progress') == 'True':
pbar = tqdm(total=ds.size)
for i in range(ds.size):
ds._get_data(i)
if pbar is not None:
pbar.update(1)
else:
print('Command `conv_dataset` only supports CACHE as destination.')
def conv_dataset_command(args):
if type(args.num_of_threads) == int and args.num_of_threads <= 0:
print(
"The numbers of threads [{}] must be positive integer.".format(args.num_of_threads))
return False
if os.path.exists(args.destination):
if not args.force:
print(
'File or directory [{}] is exists use `-F` option to overwrite it.'.format(args.destination))
return False
elif os.path.isdir(args.destination):
print('Overwrite destination [{}].'.format(args.destination))
shutil.rmtree(args.destination, ignore_errors=True)
os.mkdir(args.destination)
else:
print('Cannot overwrite file [{}] please delete it.'.format(
args.destination))
return False
else:
os.mkdir(args.destination)
_, ext = os.path.splitext(args.source)
if ext.lower() == '.csv':
if os.path.exists(args.source):
cc = CreateCache(args.source, shuffle=args.shuffle,
num_of_threads=args.num_of_threads)
print('Number of Data: {}'.format(cc._size))
print('Shuffle: {}'.format(cc._shuffle))
print('Normalize: {}'.format(args.normalize))
cc.create(args.destination, normalize=args.normalize)
else:
with CsvDataSource(args.source, shuffle=args.shuffle, normalize=args.normalize) as source:
_convert(args, source)
elif ext.lower() == '.cache':
with CacheDataSource(args.source, shuffle=args.shuffle, normalize=args.normalize) as source:
_convert(args, source)
else:
print('Command `conv_dataset` only supports CSV or CACHE as source.')
return True
def add_conv_dataset_command(subparsers):
# Convert dataset
subparser = subparsers.add_parser(
'conv_dataset', help='Convert CSV dataset to cache.')
subparser.add_argument('-F', '--force', action='store_true',
help='force overwrite destination', required=False)
subparser.add_argument(
'-S', '--shuffle', action='store_true', help='shuffle data', required=False)
subparser.add_argument('-N', '--normalize', action='store_true',
help='normalize data range', required=False)
subparser.add_argument('-t', "--num_of_threads", type=int, required=False,
help='use multithreading to convert cache, default to 10')
subparser.add_argument('source')
subparser.add_argument('destination')
subparser.set_defaults(func=conv_dataset_command)
|
the-stack_0_27984
|
import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
from core.yolov4 import YOLO, decode, filter_boxes
import core.utils as utils
from core.config import cfg
flags.DEFINE_string('weights', './data/custom-yolov4-tiny-detector_final (2).weights', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov4-416-tiny-tflite', 'path to output')
flags.DEFINE_boolean('tiny', False, 'is yolo-tiny or not')
flags.DEFINE_integer('input_size', 416, 'define input size of export model')
flags.DEFINE_float('score_thres', 0.2, 'define score threshold')
flags.DEFINE_string('framework', 'tf', 'define what framework do you want to convert (tf, trt, tflite)')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
def save_tf():
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
bbox_tensors = []
prob_tensors = []
if FLAGS.tiny:
for i, fm in enumerate(feature_maps):
if i == 0:
output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
else:
output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
bbox_tensors.append(output_tensors[0])
prob_tensors.append(output_tensors[1])
else:
for i, fm in enumerate(feature_maps):
if i == 0:
output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
elif i == 1:
output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
else:
output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
bbox_tensors.append(output_tensors[0])
prob_tensors.append(output_tensors[1])
pred_bbox = tf.concat(bbox_tensors, axis=1)
pred_prob = tf.concat(prob_tensors, axis=1)
if FLAGS.framework == 'tflite':
pred = (pred_bbox, pred_prob)
else:
boxes, pred_conf = filter_boxes(pred_bbox, pred_prob, score_threshold=FLAGS.score_thres, input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))
pred = tf.concat([boxes, pred_conf], axis=-1)
model = tf.keras.Model(input_layer, pred)
utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
model.summary()
model.save(FLAGS.output)
def main(_argv):
save_tf()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
the-stack_0_27985
|
"""
Class Features
Name: drv_configuration_time_ascat
Author(s): Fabio Delogu ([email protected])
Date: '20190729'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import time
import pandas as pd
from src.hyde.algorithm.settings.satellite.hsaf.lib_ascat_args import logger_name, time_format
from src.hyde.driver.configuration.generic.drv_configuration_debug import Exc
# Log
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
class DataObject(dict):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class Time
class DataTime:
# -------------------------------------------------------------------------------------
# Global Variable(s)
time_now = None
time_settings = None
time_run = None
time_from = None
time_to = None
time_frequency = None
time_period = None
time_rounding = None
time_steps = {}
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method class initialization
def __init__(self, time_arg=time.strftime(time_format, time.gmtime()),
time_settings=None,
time_now=None,
time_period_past=0, time_period_future=0, time_frequency='H',
time_rounding='H'):
# -------------------------------------------------------------------------------------
# Store information in global workspace
self.time_arg = time_arg
self.time_settings=time_settings
self.time_now = time_now
self.time_period_past = int(time_period_past)
self.time_period_future = int(time_period_future)
self.time_frequency = time_frequency
self.time_rounding = time_rounding
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set times
def getDataTime(self, time_reverse=False):
# -------------------------------------------------------------------------------------
# Info start
log_stream.info(' ---> Configure time ... ')
# Get time now
self.time_now = self.__getTimeNow()
# Get time argument
self.time_arg = self.__getTimeArg()
# Set time run
self.time_run = self.__setTimeRun(self.time_now, self.time_arg)
# Round time to reference
self.time_run = self.__computeTimeRound(self.time_rounding)
# Get initial time step (taking care restart time condition)
self.time_from = self.__getTimeFrom(self.time_run,
time_period=self.time_period_past,
time_frequency=self.time_frequency)
# Get ending time step
self.time_to = self.__getTimeTo(self.time_run,
time_period=self.time_period_future,
time_frequency=self.time_frequency)
# Compute period time steps
self.time_steps = self.__computeTimePeriod(self.time_from, self.time_to,
time_frequency=self.time_frequency,
time_reverse=time_reverse)
# Info end
log_stream.info(' ---> Configure time ... OK')
return DataObject(self.__dict__)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to round time to reference
def __computeTimeRound(self, time_rounding):
log_stream.info(' ----> Round time run ... ')
time_round = self.time_run.round(time_rounding)
if time_round > self.time_run:
time_round = pd.date_range(end=time_round, periods=2, freq=time_rounding)[0]
log_stream.info(' -----> Algorithm time run: [' + time_round.strftime(time_format) + ']')
log_stream.info(' ----> Round time run ... DONE')
return time_round
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time now
def __getTimeNow(self):
log_stream.info(' ----> Configure time now ... ')
time_now = None
try:
if self.time_now is None:
log_stream.info(' -----> Time now is not set. Time will be taken using time library.')
self.time_now = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script configuration file')
time_now = pd.to_datetime(self.time_now, format=time_format)
time_now = time_now.floor('min')
time_now = time_now.replace(minute=0)
self.time_now = time_now.strftime(time_format)
log_stream.info(' ----> Configure time now ... DONE [' + self.time_now + ']')
except BaseException:
Exc.getExc(' =====> ERROR: time now definition failed! Check your data and settings!', 1, 1)
return time_now
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time set in argument(s)
def __getTimeArg(self):
log_stream.info(' ----> Configure time argument ... ')
time_arg = None
try:
if self.time_arg is None:
if self.time_settings is not None:
self.time_arg = self.time_settings
log_stream.info(' -----> Time argument is not set. Time will be taken using time in settings file.')
else:
log_stream.info(' -----> Time argument is not set. Time will be taken using time library.')
self.time_arg = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script arg(s)')
time_arg = pd.to_datetime(self.time_arg, format=time_format)
time_arg = time_arg.floor('min')
time_arg = time_arg.replace(minute=0)
self.time_arg = time_arg.strftime(time_format)
log_stream.info(' ----> Configure time argument ... DONE [' + self.time_arg + ']')
except BaseException:
Exc.getExc(' =====> ERROR: time argument definition failed! Check your data and settings!', 1, 1)
return time_arg
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set time run
@staticmethod
def __setTimeRun(time_now, time_arg):
log_stream.info(' ----> Set time run ... ')
if time_arg is not None:
log_stream.info(' -----> Time argument is used as time run [' + time_arg.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_arg
else:
log_stream.info(' -----> Time now is used as time run [' + time_now.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_now
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define time restart
@staticmethod
def __getTimeFrom(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_from = time_run
else:
time_period = time_period + 1
time_from = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)[0]
return time_from
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time to
@staticmethod
def __getTimeTo(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_to = time_run
else:
time_period = time_period + 1
time_to = pd.date_range(start=time_run, periods=time_period, freq=time_frequency)[-1]
return time_to
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute period time steps
@staticmethod
def __computeTimePeriod(time_from, time_to, time_frequency='H', time_reverse=False):
time_range = pd.date_range(time_from, time_to, freq=time_frequency)
time_range = time_range.floor(time_frequency)
if time_reverse:
time_range = time_range.sort_values(return_indexer=False, ascending=False)
return time_range
# -------------------------------------------------------------------------------------
|
the-stack_0_27988
|
#!/usr/bin/env python3
import os
import re
import subprocess
import sys
from common import HOME, config
PAT_MAPS_KEY = re.compile(r'"com\.google\.android\.maps\.v2\.API_KEY" android:value=".*"')
def main(release):
generate_build_config(release)
subprocess.check_call(r'''
builddir="$MOD_HOME/build"
rm -rf $builddir/ifc $builddir/src $builddir/proguard $builddir/dex.apk $builddir/smali $MOD_HOME/app/smali/blind $MOD_HOME/app/smali/a/*.smali
mkdir -p $builddir/ifc $builddir/src
javac -Xlint:-options -g:none -source 6 -target 6 -cp $MOD_HOME/lib/android.jar:$MOD_HOME/lib/google-play-services.jar:$MOD_HOME/lib/maps.jar -d $builddir/ifc `find -H $MOD_HOME/ifc -type f -iname "*.java"`
javac -Xlint:-options -g -source 6 -target 6 -cp $builddir/ifc:$MOD_HOME/lib/android.jar:$MOD_HOME/lib/google-play-services.jar:$MOD_HOME/lib/maps.jar -d $builddir/src `find -H $MOD_HOME/src -type f -iname "*.java"`
proguard.sh @$MOD_HOME/res/%s.pg
dx --dex --output=$builddir/dex.apk $builddir/proguard.zip
java -jar $MOD_HOME/lib/baksmali.jar $builddir/dex.apk -o $builddir/smali
cp -r $builddir/smali/* $MOD_HOME/app/smali/
java -jar $MOD_HOME/lib/apktool.jar b%s $MOD_HOME/app
$MOD_HOME/bin/sign_apk.py `ls $MOD_HOME/app/dist/*.apk` %s
''' % (('release', '', 'release') if release else ('debug', ' -d', 'debug')), shell=True)
def generate_build_config(release):
maps_key = config['maps_key']['release' if release else 'debug']
if maps_key:
s = open('%s/app/AndroidManifest.xml' % HOME).read()
s = PAT_MAPS_KEY.sub('"com.google.android.maps.v2.API_KEY" android:value="%s"' % maps_key, s)
open('%s/app/AndroidManifest.xml' % HOME, 'w').write(s)
mod_version = '"' + config['version'] \
+ ('' if os.path.exists(HOME + '/app/assets/sounds') else '-mute') \
+ ('' if release else '-dev') + '"'
# bc_str = open(HOME + '/src/broot/ingress/mod/BuildConfig.java').read()
# bc_str = bc_str.replace('MOD_VERSION = null', 'MOD_VERSION = ' + mod_version)
# open(HOME + '/build/BuildConfig.java', 'w').write(bc_str)
def generate_asset_string(asset):
name = 'normal' if asset=='data' else asset.replace('data-','').replace('-','_')
caption = config['assets'].get(asset, {'caption': "Custom " + name.replace('_', ' ')})['caption']
parent = config['assets'].get(asset,{}).get('parent')
string = [asset, caption, parent] if parent else [asset, caption]
return '%s(%s)' % (name, ', '.join(['"%s"' % s for s in string]))
if __name__ == '__main__':
main(len(sys.argv) > 1 and sys.argv[1] == 'release')
|
the-stack_0_27989
|
"""Parser of Excel data files."""
from datetime import datetime
import xlsxio
from xlrd import open_workbook
from virtual_warehouse.data.data_model import (
Inventory,
Item,
ItemUnit,
Location,
Order,
RackLocation,
)
from virtual_warehouse.data.utils import convert_date, convert_type, estimate_sheet_type
class Document:
"""Document class which loads xls or xlsx file and parse different data objects."""
def __init__(self, file_path):
# Determines backend for loading documents (xlsx files uses openpyxl)
self.is_xlsx = Document.check_xlsx(file_path)
self.locations = {}
self.items = {}
self.balance = {}
self.orders = {}
if self.is_xlsx:
self.doc = xlsxio.XlsxioReader(file_path)
else:
self.doc = open_workbook(file_path)
@staticmethod
def check_xlsx(file_path):
"""Check if document is .xlsx document (required for openpyxl library)."""
return file_path[-5:] == ".xlsx"
@staticmethod
def get_sheet_names(file_path):
"""Get names of all sheets in document."""
if Document.check_xlsx(file_path):
with xlsxio.XlsxioReader(file_path) as reader:
names = reader.get_sheet_names()
else:
doc = open_workbook(file_path, on_demand=True)
names = doc.sheet_names()
return [[n, estimate_sheet_type(n)] for n in names]
def close(self):
"""Release resources owned by the document."""
if self.is_xlsx:
self.doc.close()
def parse_locations(self, sheet_name="LOCATIONmaster"):
"""Parse LOCATIONmaster sheet."""
if self.is_xlsx:
types = [str, str, str, str, float, float, float, str, float, str, str]
with self.doc.get_sheet(sheet_name, types=types) as sheet:
sheet.read_header()
for row in sheet.iter_rows():
if len(row) == 0 or not row[0]:
continue
location_id = row[0]
if convert_type(row[1]) == "rack":
self.locations[location_id] = RackLocation.create(*row[:11])
else:
self.locations[location_id] = Location.create(*row[:11])
else:
sheet = self.doc.sheet_by_name(sheet_name)
for row in range(1, sheet.nrows):
location_id = str(sheet.cell(row, 0).value)
if not location_id:
continue
values = [sheet.cell(row, i).value for i in range(11)]
if convert_type(values[1]) == "rack":
self.locations[location_id] = RackLocation.create(*values)
else:
self.locations[location_id] = Location.create(*values)
return self.locations
def parse_coordinates(self, sheet_name="XYZ_coordinates"):
"""Parse XYZ_coordinates sheet."""
if self.is_xlsx:
types = [str, float, float, float]
with self.doc.get_sheet(sheet_name, types=types) as sheet:
sheet.read_header()
for row in sheet.iter_rows():
if len(row) == 0 or not row[0]:
continue
location_id = row[0]
self.locations[location_id].set_coord(*row[1:4])
else:
sheet = self.doc.sheet_by_name(sheet_name)
for row in range(1, sheet.nrows):
location_id = str(sheet.cell(row, 0).value)
if not location_id:
continue
self.locations[location_id].set_coord(
*(sheet.cell(row, i).value for i in range(1, 4))
)
return self.locations
def parse_items(self, sheet_name="ITEMmaster"):
"""Parse ITEMmaster sheet."""
if self.is_xlsx:
types = [str, str, str, str] + 5 * [
int,
str,
float,
float,
float,
str,
float,
str,
]
with self.doc.get_sheet(sheet_name, types=types) as sheet:
sheet.read_header()
for row in sheet.iter_rows():
if len(row) == 0 or not row[0]:
continue
item_id, description, gtype, zone = row[:4]
unit_levels = []
for col in range(4, len(row), 8):
unit_levels.append(
ItemUnit.create(f"{item_id}-u{col}", *row[col : col + 8],)
)
self.items[item_id] = Item.create(
item_id, description, gtype, zone, unit_levels[0], unit_levels
)
else:
sheet = self.doc.sheet_by_name(sheet_name)
for row in range(1, sheet.nrows):
item_id, description, gtype, zone = (
sheet.cell(row, i).value for i in range(4)
)
item_id = str(item_id)
if not item_id:
continue
unit_levels = []
for col in range(4, sheet.ncols, 8):
unit_levels.append(
ItemUnit.create(
f"{item_id}-u{col}",
*(sheet.cell(row, col + i).value for i in range(8)),
)
)
self.items[item_id] = Item.create(
item_id, description, gtype, zone, unit_levels[0], unit_levels
)
return self.items
def parse_inventory_balance(self, sheet_name="Inventory Ballance"):
"""Parse Inventory Balance sheet ('balance' in final version, most likely)."""
if self.is_xlsx:
types = [datetime, str, str, str, datetime, int, int, int, int, int]
with self.doc.get_sheet(sheet_name, types=types) as sheet:
sheet.read_header()
for row in sheet.iter_rows():
if len(row) == 0 or not row[0]:
continue
date, location_id = row[:2]
if date not in self.balance:
self.balance[date] = {location_id: []}
elif location_id not in self.balance[date]:
self.balance[date][location_id] = []
self.balance[date][location_id].append(Inventory.create(*row[:10]))
else:
sheet = self.doc.sheet_by_name(sheet_name)
for row in range(1, sheet.nrows):
date = convert_date(sheet.cell(row, 0).value, "%d.%m.%Y")
location_id = str(sheet.cell(row, 1).value)
if not date:
continue
if date not in self.balance:
self.balance[date] = {location_id: []}
elif location_id not in self.balance[date]:
self.balance[date][location_id] = []
self.balance[date][location_id].append(
Inventory.create(
date, *(sheet.cell(row, i).value for i in range(1, 10))
)
)
return self.balance
def parse_orders(self, sheet_name="Order"):
"""Parse Order sheet."""
if self.is_xlsx:
types = [str, str, str, str, str, str, int, str, int, int, str]
with self.doc.get_sheet(sheet_name, types=types) as sheet:
sheet.read_header()
for row in sheet.iter_rows():
if len(row) == 0 or not row[0]:
continue
order_id = row[0]
if order_id in self.orders:
self.orders[order_id].add_item(*row[7:11])
else:
self.orders[order_id] = Order.create(*row[:11])
else:
sheet = self.doc.sheet_by_name(sheet_name)
for row in range(1, sheet.nrows):
order_id = str(sheet.cell(row, 0).value)
if not order_id:
continue
if order_id in self.orders:
self.orders[order_id].add_item(
*(sheet.cell(row, i).value for i in range(7, 11))
)
else:
self.orders[order_id] = Order.create(
*(sheet.cell(row, i).value for i in range(11))
)
return self.orders
def parse_document(self):
"""Parse the whole document."""
locations = self.parse_locations()
locations = self.parse_coordinates()
items = self.parse_items()
balance = self.parse_inventory_balance()
orders = self.parse_orders()
return locations, items, balance, orders
|
the-stack_0_27990
|
import threading
from contextlib import contextmanager
import sys
import logging
import copy
from honeybadger.plugins import default_plugin_manager
import honeybadger.connection as connection
import honeybadger.fake_connection as fake_connection
from .payload import create_payload
from .config import Configuration
logging.getLogger('honeybadger').addHandler(logging.NullHandler())
class Honeybadger(object):
def __init__(self):
self.config = Configuration()
self.thread_local = threading.local()
self.thread_local.context = {}
def _send_notice(self, exception, exc_traceback=None, context=None):
payload = create_payload(exception, exc_traceback, config=self.config, context=context)
if self.config.is_dev() and not self.config.force_report_data:
fake_connection.send_notice(self.config, payload)
else:
connection.send_notice(self.config, payload)
def _get_context(self):
return getattr(self.thread_local, 'context', {})
def begin_request(self, request):
self.thread_local.context = self._get_context()
def wrap_excepthook(self, func):
self.existing_except_hook = func
sys.excepthook = self.exception_hook
def exception_hook(self, type, value, exc_traceback):
self._send_notice(value, exc_traceback, context=self._get_context())
self.existing_except_hook(type, value, exc_traceback)
def notify(self, exception=None, error_class=None, error_message=None, context={}):
if exception and exception.__class__.__name__ in self.config.excluded_exceptions:
return #Terminate the function
if exception is None:
exception = {
'error_class': error_class,
'error_message': error_message
}
merged_context = self._get_context()
if context:
merged_context.update(context)
self._send_notice(exception, context=merged_context)
def configure(self, **kwargs):
self.config.set_config_from_dict(kwargs)
self.auto_discover_plugins()
def auto_discover_plugins(self):
#Avoiding circular import error
from honeybadger import contrib
if self.config.is_aws_lambda_environment:
default_plugin_manager.register(contrib.AWSLambdaPlugin())
def set_context(self, **kwargs):
# This operation is an update, not a set!
self.thread_local.context = self._get_context()
self.thread_local.context.update(kwargs)
def reset_context(self):
self.thread_local.context = {}
@contextmanager
def context(self, **kwargs):
original_context = copy.copy(self._get_context())
self.set_context(**kwargs)
try:
yield
except:
raise
else:
self.thread_local.context = original_context
|
the-stack_0_27995
|
# Copyright (C) By StarkGang [@STARKXD]
# Don't edit credits
# Works On Bases Of Cyberboysumanjay's Inshorts News Api
# Test
import requests
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
from var import Var
newslog = Var.NEWS_CHANNEL_ID
@borg.on(admin_cmd("news (.*)"))
@borg.on(sudo_cmd("news (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if Var.NEWS_CHANNEL_ID is None:
await edit_or_reply(
event, "`Please ADD NEWS_CHANNEL_ID For This Module To Work`"
)
return
infintyvar = event.pattern_match.group(1)
main_url = f"https://inshortsapi.vercel.app/news?category={infintyvar}"
stuber = await edit_or_reply(
event,
f"Ok ! Fectching {infintyvar} From inshortsapi Server And Sending To News Channel",
)
await stuber.edit("All News Has Been Sucessfully Send To News Channel")
starknews = requests.get(main_url).json()
for item in starknews["data"]:
sedlyf = item["content"]
img = item["imageUrl"]
writter = item["author"]
dateis = item["date"]
readthis = item["readMoreUrl"]
titles = item["title"]
sed1 = img
sedm = f"**Title : {titles}** \n{sedlyf} \nDate : {dateis} \nAuthor : {writter} \nReadMore : {readthis}"
await borg.send_file(newslog, sed1, caption=sedm)
|
the-stack_0_27996
|
# This is a modified version of cocoeval.py where we also have the densepose evaluation.
__author__ = 'tsungyi'
import numpy as np
import datetime
import time
from collections import defaultdict
from pycocotools import mask as maskUtils
import copy
import h5py
import pickle
from scipy.io import loadmat
import scipy.spatial.distance as ssd
import os
import itertools
class denseposeCOCOeval:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'uv'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm', sigma=1.):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
if iouType == 'uv':
self.sigma = sigma
self.ignoreThrBB = 0.7
self.ignoreThrUV = 0.9
def _loadGEval(self):
print('Loading densereg GT..')
# prefix = os.path.dirname(__file__) + '/../../DensePoseData/eval_data/'
prefix = "data/coco_densepose/eval_data/"
print(prefix)
SMPL_subdiv = loadmat(prefix + 'SMPL_subdiv.mat')
self.PDIST_transform = loadmat(prefix + 'SMPL_SUBDIV_TRANSFORM.mat')
self.PDIST_transform = self.PDIST_transform['index'].squeeze()
UV = np.array([
SMPL_subdiv['U_subdiv'],
SMPL_subdiv['V_subdiv']
]).squeeze()
ClosestVertInds = np.arange(UV.shape[1])+1
self.Part_UVs = []
self.Part_ClosestVertInds = []
for i in np.arange(24):
self.Part_UVs.append(
UV[:, SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
)
self.Part_ClosestVertInds.append(
ClosestVertInds[SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
)
arrays = {}
f = h5py.File( prefix + 'Pdist_matrix.mat', 'r')
for k, v in f.items():
arrays[k] = np.array(v)
self.Pdist_matrix = arrays['Pdist_matrix']
self.Part_ids = np.array( SMPL_subdiv['Part_ID_subdiv'].squeeze())
# Mean geodesic distances for parts.
self.Mean_Distances = np.array( [0, 0.351, 0.107, 0.126,0.237,0.173,0.142,0.128,0.150] )
# Coarse Part labels.
self.CoarseParts = np.array( [ 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 7, 8, 8] )
print('Loaded')
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
def _getIgnoreRegion(iid, coco):
img = coco.imgs[iid]
if not 'ignore_regions_x' in img.keys():
return None
if len(img['ignore_regions_x']) == 0:
return None
rgns_merged = []
for region_x, region_y in zip(img['ignore_regions_x'], img['ignore_regions_y']):
rgns = [iter(region_x), iter(region_y)]
rgns_merged.append(list(it.next() for it in itertools.cycle(rgns)))
rles = maskUtils.frPyObjects(rgns_merged, img['height'], img['width'])
rle = maskUtils.merge(rles)
return maskUtils.decode(rle)
def _checkIgnore(dt, iregion):
if iregion is None:
return True
bb = np.array(dt['bbox']).astype(np.int)
x1,y1,x2,y2 = bb[0],bb[1],bb[0]+bb[2],bb[1]+bb[3]
x2 = min([x2,iregion.shape[1]])
y2 = min([y2,iregion.shape[0]])
if bb[2]* bb[3] == 0:
return False
crop_iregion = iregion[y1:y2, x1:x2]
if crop_iregion.sum() == 0:
return True
if not 'uv' in dt.keys(): # filtering boxes
return crop_iregion.sum()/bb[2]/bb[3] < self.ignoreThrBB
# filtering UVs
ignoremask = np.require(crop_iregion, requirements=['F'])
uvmask = np.require(np.asarray(dt['uv'][0]>0), dtype = np.uint8,
requirements=['F'])
uvmask_ = maskUtils.encode(uvmask)
ignoremask_ = maskUtils.encode(ignoremask)
uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0]
return uviou < self.ignoreThrUV
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# if iouType == 'uv', add point gt annotations
if p.iouType == 'uv':
self._loadGEval()
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
if p.iouType == 'uv':
gt['ignore'] = ('dp_x' in gt)==0
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self._igrgns = defaultdict(list)
for gt in gts:
iid = gt['image_id']
if not iid in self._igrgns.keys():
self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt)
if _checkIgnore(gt, self._igrgns[iid]):
self._gts[iid, gt['category_id']].append(gt)
for dt in dts:
if _checkIgnore(dt, self._igrgns[dt['image_id']]):
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType in ['segm', 'bbox']:
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
elif p.iouType == 'uv':
computeIoU = self.computeOgps
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d, g, iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd), axis=0) + np.max((z, xd-x1), axis=0)
dy = np.max((z, y0-yd), axis=0) + np.max((z, yd-y1), axis=0)
e = (dx**2 + dy**2) / vars / (gt['area'] + np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def computeOgps(self, imgId, catId):
p = self.params
# dimention here should be Nxm
g = self._gts[imgId, catId]
d = self._dts[imgId, catId]
inds = np.argsort([-d_['score'] for d_ in d], kind='mergesort')
d = [d[i] for i in inds]
if len(d) > p.maxDets[-1]:
d = d[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(g) == 0 or len(d) == 0:
return []
ious = np.zeros((len(d), len(g)))
# compute opgs between each detection and ground truth object
sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5
# 1 # dist = 0.3m corresponds to ogps = 0.96
# 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5)
for j, gt in enumerate(g):
if not gt['ignore']:
g_ = gt['bbox']
for i, dt in enumerate(d):
#
dx = dt['bbox'][3]
dy = dt['bbox'][2]
dp_x = np.array( gt['dp_x'] )*g_[2]/255.
dp_y = np.array( gt['dp_y'] )*g_[3]/255.
px = ( dp_y + g_[1] - dt['bbox'][1]).astype(np.int)
py = ( dp_x + g_[0] - dt['bbox'][0]).astype(np.int)
#
pts = np.zeros(len(px))
pts[px>=dx] = -1; pts[py>=dy] = -1
pts[px<0] = -1; pts[py<0] = -1
#print(pts.shape)
if len(pts) < 1:
ogps = 0.
elif np.max(pts) == -1:
ogps = 0.
else:
px[pts==-1] = 0; py[pts==-1] = 0;
ipoints = dt['uv'][0, px, py]
upoints = dt['uv'][1, px, py]/255. # convert from uint8 by /255.
vpoints = dt['uv'][2, px, py]/255.
ipoints[pts==-1] = 0
## Find closest vertices in subsampled mesh.
cVerts, cVertsGT = self.findAllClosestVerts(gt, upoints, vpoints, ipoints)
## Get pairwise geodesic distances between gt and estimated mesh points.
dist = self.getDistances(cVertsGT, cVerts)
## Compute the Ogps measure.
# Find the mean geodesic normalization distance for each GT point, based on which part it is on.
Current_Mean_Distances = self.Mean_Distances[ self.CoarseParts[ self.Part_ids [ cVertsGT[cVertsGT>0].astype(int)-1] ] ]
# Compute gps
ogps_values = np.exp(-(dist**2)/(2*(Current_Mean_Distances**2)))
#
if len(dist)>0:
ogps = np.sum(ogps_values)/ len(dist)
ious[i, j] = ogps
gbb = [gt['bbox'] for gt in g]
dbb = [dt['bbox'] for dt in d]
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in g]
ious_bb = maskUtils.iou(dbb, gbb, iscrowd)
return ious, ious_bb
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) == 0:
return None
for g in gt:
#g['_ignore'] = g['ignore']
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
g['_ignore'] = True
else:
g['_ignore'] = False
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
if p.iouType == 'uv':
#print('Checking the length', len(self.ious[imgId, catId]))
#if len(self.ious[imgId, catId]) == 0:
# print(self.ious[imgId, catId])
ious = self.ious[imgId, catId][0][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
ioubs = self.ious[imgId, catId][1][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
else:
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if np.all(gtIg) == True and p.iouType == 'uv':
dtIg = np.logical_or(dtIg, True)
if len(ious)>0: # and not p.iouType == 'uv':
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
if ious[dind,gind] == 0.:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
if p.iouType == 'uv':
if not len(ioubs)==0:
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
if dtm[tind, dind] == 0:
ioub = 0.8
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# continue to next gt unless better match made
if ioubs[dind,gind] < ioub:
continue
# if match successful and best so far, store appropriately
ioub = ioubs[dind,gind]
m = gind
# if match made store id of match for both dt and gt
if m > -1:
dtIg[:, dind] = gtIg[m]
if gtIg[m]:
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
#print('Done with the function', len(self.ious[imgId, catId]))
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
# create dictionary for future indexing
print('Categories:', p.catIds)
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0)
#print('DTIG', np.sum(np.logical_not(dtIg)), len(dtIg))
#print('GTIG', np.sum(np.logical_not(gtIg)), len(gtIg))
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
#print('TP_SUM', tp_sum, 'FP_SUM', fp_sum)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
print('Final', np.max(precision), np.min(precision))
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
measure = 'IoU'
if self.params.iouType == 'keypoints':
measure = 'OKS'
elif self.params.iouType =='uv':
measure = 'OGPS'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(np.abs(iouThr - p.iouThrs)<0.001)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
def _summarizeUvs():
stats = np.zeros((18,))
stats[0] = _summarize(1, maxDets=self.params.maxDets[0])
stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.5)
stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.55)
stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.60)
stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.65)
stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.70)
stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.75)
stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.80)
stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.85)
stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.90)
stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.95)
stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng='medium')
stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng='large')
stats[13] = _summarize(0, maxDets=self.params.maxDets[0])
stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=.5)
stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=.75)
stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng='medium')
stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType in ['segm','bbox']:
summarize = _summarizeDets
elif iouType in ['keypoints']:
summarize = _summarizeKps
elif iouType in ['uv']:
summarize = _summarizeUvs
self.stats = summarize()
def __str__(self):
self.summarize()
# ================ functions for dense pose ==============================
def findAllClosestVerts(self, gt, U_points, V_points, Index_points):
#
I_gt = np.array(gt['dp_I'])
U_gt = np.array(gt['dp_U'])
V_gt = np.array(gt['dp_V'])
#
#print(I_gt)
#
ClosestVerts = np.ones(Index_points.shape)*-1
for i in np.arange(24):
#
if sum(Index_points == (i+1))>0:
UVs = np.array( [U_points[Index_points == (i+1)],V_points[Index_points == (i+1)]])
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist( Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVerts[Index_points == (i+1)] = Current_Part_ClosestVertInds[ np.argmin(D,axis=0) ]
#
ClosestVertsGT = np.ones(Index_points.shape)*-1
for i in np.arange(24):
if sum(I_gt==(i+1))>0:
UVs = np.array([
U_gt[I_gt==(i+1)],
V_gt[I_gt==(i+1)]
])
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist( Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVertsGT[I_gt==(i+1)] = Current_Part_ClosestVertInds[ np.argmin(D,axis=0) ]
#
return ClosestVerts, ClosestVertsGT
def getDistances(self, cVertsGT, cVerts):
ClosestVertsTransformed = self.PDIST_transform[cVerts.astype(int)-1]
ClosestVertsGTTransformed = self.PDIST_transform[cVertsGT.astype(int)-1]
#
ClosestVertsTransformed[cVerts<0] = 0
ClosestVertsGTTransformed[cVertsGT<0] = 0
#
cVertsGT = ClosestVertsGTTransformed
cVerts = ClosestVertsTransformed
#
n = 27554
dists = []
for d in range(len(cVertsGT)):
if cVertsGT[d] > 0:
if cVerts[d] > 0:
i = cVertsGT[d] - 1
j = cVerts[d] - 1
if j == i:
dists.append(0)
elif j > i:
ccc = i
i = j
j = ccc
i = n-i-1
j = n-j-1
k = (n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1
k = ( n*n - n )/2 -k -1
dists.append(self.Pdist_matrix[int(k)][0])
else:
i= n-i-1
j= n-j-1
k = (n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1
k = ( n*n - n )/2 -k -1
dists.append(self.Pdist_matrix[int(k)][0])
else:
dists.append(np.inf)
return np.array(dists).squeeze()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
def setUvParams(self):
self.imgIds = []
self.catIds = []
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
elif iouType == 'uv':
self.setUvParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
|
the-stack_0_27997
|
import requests
import json
class UrlscanException(Exception):
pass
class Urlscan:
def __init__(self):
self.base_url = "https://urlscan.io/api/"
self.version = "v1"
def search(self, query):
assert len(query) > 0, "Qeury must be defined"
payload = {"q": query}
url = self.url_for("/search/")
r = requests.get(url, params=payload)
if r.status_code == 200:
return r.json()
else:
raise UrlscanException("urlscan.io returns %s" % r.status_code)
def result(self, uuid):
assert len(uuid) > 0, "UUID must be defined"
url = self.url_for("/result/{}/".format(uuid))
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
raise UrlscanException("urlscan.io returns %s" % r.status_code)
def url_for(self, path):
return "{}{}{}".format(self.base_url, self.version, path)
|
the-stack_0_27998
|
# -*- coding: utf-8 -*-
# MOMOの大気中飛行時の荷重計算
# 軸力と曲げモーメントを計算する
# 参考文献:液体ロケットの構造システム設計
from __future__ import print_function
import sys
import numpy as np
import scipy.integrate as integrate
from scipy.interpolate import interp1d
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager
from matplotlib.font_manager import FontProperties
from matplotlib.backends.backend_pdf import PdfPages
import time as tm
from fitting import fitting_6lines
plt.close("all")
process_start = tm.time()
class Component:
def __init__(self, length, weight_dry, prop_init, prop_end, burntime, press=0.0):
self.length = length
self.weight_dry = weight_dry
self.prop_init = prop_init
self.prop_end = prop_end
self.burntime = burntime
self.prop = np.linspace(prop_init, prop_end, burntime)
self.weight_wet = np.linspace(weight_dry, weight_dry, burntime) + self.prop
self.press = press # NOT USED NOW
# イナートセット
self.density = self.weight_dry/self.length*np.ones([burntime,self.length])
# 推進剤セット
if self.prop_init != 0.0:
density_prop_init = prop_init/self.length # 初期推進剤線密度
for time in range(self.burntime):
num_block_prop = int(self.prop[time]/density_prop_init+1e-8)
array_current_prop = density_prop_init*np.ones(self.length)
array_current_prop[0:self.length-num_block_prop]=0
self.density[time] = self.density[time] + array_current_prop
amari = self.weight_wet[time]-sum(self.density[time])
if amari < -1e-8:
print("ERROR: NEGATIVE MASS")
print(time,self.burntime,num_block_prop,self.length,array_current_prop,amari)
exit(1)
if abs(amari)>1e-8:
self.density[time][-num_block_prop-1] = self.density[time][-num_block_prop-1] + amari
# 整合性確認
for time in range(self.burntime):
if abs(sum(self.density[time])-self.weight_wet[time])>1e-8:
print(u"ERROR: 質量と密度積分値が一致しません。")
print(self.weight_wet[time]-sum(self.density[time]),density_prop_init)
exit(1)
def show(self):
print(u"[%d mm,\t%d kg,\t%d kg,\t%d kg]" %(self.length, self.weight_dry, self.prop_init, self.prop_end))
class Rocket:
def __init__(self, burntime, diameter):
# 以下はFinalizeが必要
# dmdx
self.burntime = burntime
self.length = 0
self.weight_dry = 0
self.weight_wet = 0
self.prop = np.zeros(self.burntime)
self.dmdx = np.zeros([self.burntime, self.length])
self.mass = np.zeros(self.burntime)
self.inertia = np.zeros(self.burntime)
self.x_CG = np.zeros(self.burntime)
self.x_CP = np.zeros(self.burntime)
self.x = np.arange(self.length)
#self.inertia_distribution = np.zeros([self.burntime, self.length+1])
self.diameter= diameter
self.area = 1.0/4 * np.pi * diameter**2 # 面積 m2
self.divid = [0]
def add_component(self, component):
# 部品を加えてロケットのパラメータを更新
length_start = self.length
self.length = self.length + component.length
self.weight_dry = self.weight_dry + component.weight_dry
self.prop = self.prop + component.prop
self.weight_wet = self.weight_wet + component.weight_wet
self.divid.append(self.length)
self.dmdx = np.concatenate((self.dmdx,np.zeros([self.burntime, component.length])),axis=1)
#self.inertia_distribution = np.zeros([self.burntime, self.length])
for time in range(self.burntime):
self.dmdx[time][length_start:self.length] = component.density[time]
def set_C_A(self, C_A):
# 部品を加えた後に軸力係数をセット
# C_A:全機の軸力係数、今後は入力を分布にできるように関数だけ用意
self.C_A = C_A
self.dC_Adx = np.zeros([self.burntime, self.length+1])
for time in range(self.burntime):
self.dC_Adx[time][0] = C_A
def set_C_N(self, C_N, nose, fin, engine):
# 部品を加えた後に法線力係数をセット
# C_N:全機の法線力係数、今後は入力を分布にできるように関数を用意
self.dC_Ndx = np.zeros([self.burntime, self.length+1])
pos_nose = int(nose.length/2)
pos_fin = int(self.length - (engine.length + fin.length / 2))
for time in range(self.burntime):
self.C_N_nose = C_N * (self.x_CP[time] - pos_fin ) / (pos_nose - pos_fin)
self.C_N_tail = C_N * (self.x_CP[time] - pos_nose) / (pos_fin - pos_nose)
self.dC_Ndx[time][pos_nose] = self.C_N_nose
self.dC_Ndx[time][pos_fin] = self.C_N_tail
def set_x_CP(self, x_CP):
self.x_CP = x_CP*np.ones(self.burntime)
def finalize(self):
# dmdxの境界値処理(機体後端を0)
self.x = np.arange(rocket.length+1)
self.dmdx = np.concatenate((self.dmdx,np.zeros([self.burntime, 1])),axis=1)
# 慣性諸元計算
for time in range(self.burntime):
self.mass[time] = np.sum(self.dmdx[time])
self.x_CG[time] = np.sum(self.dmdx[time] * self.x)
self.inertia[time] = np.sum(self.dmdx[time] * self.x ** 2)
self.x_CG[time] = self.x_CG[time] / self.mass[time] # [mm]
self.inertia[time] = self.inertia[time] - self.mass[time] * self.x_CG[time] ** 2 # [kg*mm2]
def calc_axial_load(rocket,thrust_a,q_a,rating_time):
drag = np.zeros([rocket.burntime, rocket.length+1])
force_A = np.zeros([rocket.burntime, rocket.length+1])
X_dotdot = np.zeros(rocket.burntime)
#for time in range(burntime): # for all the duration.
for time in rating_time:
X_dotdot[time] = (thrust_a[time] - rocket.area * q_a[time] * rocket.C_A) / rocket.mass[time] # [m/s2]
drag[time] = rocket.area * q_a[time] * np.cumsum(rocket.dC_Adx[time])
force_A[time] = - drag[time] - X_dotdot[time] * np.cumsum(rocket.dmdx[time])
return [X_dotdot,drag,force_A]
def calc_bending_moment(rocket,thrust_a,q_a,rating_time):
M_a = np.zeros([rocket.burntime, rocket.length+1])
M_a1 = np.zeros([rocket.burntime, rocket.length+1])
M_a2 = np.zeros([rocket.burntime, rocket.length+1])
M_d = np.zeros([rocket.burntime, rocket.length+1])
M_d1 = np.zeros([rocket.burntime, rocket.length+1])
M_d2 = np.zeros([rocket.burntime, rocket.length+1])
M1 = np.zeros([rocket.burntime, rocket.length+1])
M2 = np.zeros([rocket.burntime, rocket.length+1])
Z_dotdot_d = np.zeros(rocket.burntime)
Z_dotdot_a = np.zeros(rocket.burntime)
Omega_dot_d = np.zeros(rocket.burntime)
Omega_dot_a = np.zeros(rocket.burntime)
#for time in range(burntime): # for all the duration
for time in rating_time:
#if(time%10==0):print(u"曲げモーメント計算:燃焼時間 %d 秒"% (time)) # for all the duration
## Symbol:
# *_d: Moment by Gimbal
# *_a: Moment by Air Force
# gimbal
Z_dotdot_d[time] = T_g_a[time] / rocket.mass[time] # [m/s2]
Omega_dot_d[time] = T_g_a[time] * rocket.x_CG[time] / rocket.inertia[time] # [m/mm/s2]
for pos in range(rocket.length+1):
M_d1[time][pos] = T_g_a[time] * pos * 1e-3 # [Nm]
M_d2[time][pos] = - np.sum(\
(Z_dotdot_d[time] + Omega_dot_d[time]*(rocket.x_CG[time]- rocket.x[:pos]))\
* rocket.dmdx[time][:pos]\
* (pos - rocket.x[:pos])) * 1e-3 # [Nm]
# air force
Z_dotdot_a[time] = rocket.area * q_a[time] * np.sum(rocket.dC_Ndx[time]) / rocket.mass[time] # [m/s2]
Omega_dot_a[time] = rocket.area * q_a[time] * np.sum(rocket.dC_Ndx[time]*(rocket.x_CG[time]-rocket.x)) / rocket.inertia[time] # [m/mm/s2]
for pos in range(rocket.length+1):
M_a1[time][pos] = + rocket.area * q_a[time] * np.sum(rocket.dC_Ndx[time][:pos] * (pos - rocket.x[:pos])) * 1e-3 # [Nm]
M_a2[time][pos] = - np.sum(\
(Z_dotdot_a[time] + Omega_dot_a[time]*(rocket.x_CG[time]- rocket.x[:pos]))\
* rocket.dmdx[time][:pos]\
* (pos - rocket.x[:pos])) * 1e-3 # [Nm]
M_a = M_a1 + M_a2
M_d = M_d1 + M_d2
M1 = M_a + M_d
M2 = M_a - M_d
M_max = np.maximum (abs(M1),abs(M2))
return [M_a, M_d, M1, M2, M_max, Z_dotdot_d, Z_dotdot_a, Omega_dot_d, Omega_dot_a]
def calc_equivalent_axial_force(force_A,M_max,diameter):
F_eq_comp = force_A-M_max*4/diameter
F_eq_tens = force_A+M_max*4/diameter
return [F_eq_comp, F_eq_tens]
def calc_rating_load(load,divid,rating_time,sign):
load_rating = []
for i in range(len(divid)-1):
i_start = divid[i]
i_end = divid[i+1]+1
val = 0
for j,time in enumerate(rating_time):
cur = max(load[time][i_start:i_end]*sign)
val = max(val,cur)
load_rating.append(val*sign)
return load_rating
if __name__ == "__main__":
print(u"****** IST 荷重計算プログラム ******")
g0 = 9.80665
#======諸元入力ゾーン====================================================================================================================
#=====================================================================================================================================
#=====================================================================================================================================
# 軌道情報 ZERO Phase6E軌道設計 参考
burntime = 163 # 燃焼時間 秒
rating_label = ["LiftOff" , "MaxQ" , "MaxDrag" , "MECO" ]
rating_time = [0 , 55 , 60 , 162 ] # 評定となる時刻 秒
thrust = [500400 , 570600 , 570600 , 571000 ] # 推力 N
q = [6.4E+00 , 3.1E+04 , 3.1E+04 , 0.2E+03 ] # 動圧 Pa
max_gimbal_angle = 8 * np.pi/180 # 最大舵角[rad]
T_g = [v*np.sin(max_gimbal_angle) for v in thrust]# ジンバルによる横推力 N
# 空力諸元 MISSILE DATCOM or CFD
dia = 2.0 # 機体直径 m
C_A = 0.746 # 軸力係数 ND
C_N = 2.1 # 法線力係数 ND
x_CP = 6600 # ノーズからの風圧中心位置 mm
savefig_flag = True # 出力を保存するかどうか
savepdf_flag = True # PDF出力するかどうか
save_name = u"ZERO_Ph6F_NP_Case1"
if(savepdf_flag):pdf = PdfPages(save_name + u"_plot.pdf")
sys.stdout = sys.__stdout__
print(u"コンポーネント設定開始")
# === コンポーネント ====
# comp = Component(length_mm, weight_kg, prop_init_kg, prop_end_kg, burntime_sec, press_MPa)
nose = Component(3000, 235, 0, 0, burntime, 0.0)
tank_2nd_LOx = Component(500, 142, 3967, 3967, burntime, 0.5)
tank_2nd_inter = Component(2180, 364, 0, 0, burntime, 0.0)
tank_2nd_fuel = Component(900, 80, 1853, 1853, burntime, 0.5)
tank_interstage = Component(3000, 300, 0, 0, burntime, 0.0)
tank_1st_LOx = Component(5400, 686, 19477, 109, burntime, 0.5)
tank_1st_inter = Component(1500, 2457, 0, 0, burntime, 0.0)
tank_1st_fuel = Component(3800, 517, 10223, 97, burntime, 0.5)
fin = Component(1000, 200, 0, 0, burntime, 0.0)
engine = Component(1000, 1080, 0, 0, burntime, 0.0)
# === 曲げモーメントの曲線フィッティング ===
fitting_flag = False # 関数フィッティングするかどうか
## x1~6 : 集中荷重を受ける機体頭からの距離 mm
#x1 = 0
#x2 = 1096
#x3 = 3497
#x4 = 5605
#x5 = 6105
#x6 = 8905
#x_end = 9884
# ==== 入力ここまで =====================================================================================================================
#=====================================================================================================================================
#=====================================================================================================================================
# === コンポーネントをRocketクラスにAdd ====
rocket = Rocket(burntime, dia)
rocket.add_component(nose)
rocket.add_component(tank_2nd_LOx)
rocket.add_component(tank_2nd_inter)
rocket.add_component(tank_2nd_fuel)
rocket.add_component(tank_interstage)
rocket.add_component(tank_1st_LOx)
rocket.add_component(tank_1st_inter)
rocket.add_component(tank_1st_fuel)
rocket.add_component(fin)
rocket.add_component(engine)
rocket.set_x_CP(x_CP)
rocket.set_C_A(C_A)
rocket.set_C_N(C_N,nose,fin,engine)
rocket.finalize()
print(u"コンポーネント設定終了")
print(u"入力値出力開始")
# ==== 補間 ====
time_a = np.linspace(0,burntime, burntime+1)
T_g_f = interp1d(rating_time, T_g, fill_value="extrapolate")
thrust_f = interp1d(rating_time, thrust, fill_value="extrapolate")
q_f = interp1d(rating_time, q, fill_value="extrapolate")
T_g_a = T_g_f(time_a)
thrust_a = thrust_f(time_a)
q_a = q_f(time_a)
# ==== 重量分布のプロット ====
plt.figure(1)
for (i, time) in enumerate(rating_time):
plt.plot(range(rocket.length+1),rocket.dmdx[time], label= "%s" % (rating_label[i]))
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.1)
plt.xlabel("STA mm")
plt.ylabel("mass distribution kg/mm")
plt.title("mass distribution")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + "_load_calculation_mass distribution.png")
if(savepdf_flag):pdf.savefig()
# ==== 軸力係数&法線力係数のプロット =====
time_force = 0
plt.figure(2)
plt.plot(range(rocket.length+1),rocket.dC_Adx[time_force], linewidth=5)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA [mm]")
plt.ylabel("dC_A/dx [kg/mm]")
plt.xlim([-100, rocket.length+100])
plt.title(r"$\frac{dC_A}{dx}$")
if(savefig_flag):plt.savefig(save_name + "_load_calculation_dCAdx.png")
if(savepdf_flag):pdf.savefig()
plt.figure(3)
plt.plot(range(rocket.length+1),rocket.dC_Ndx[time_force], linewidth=5)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA [mm]")
plt.ylabel("dC_N/dx [kg/mm]")
plt.xlim([-100, rocket.length+100])
plt.title(r"$\frac{dC_N}{dx}$")
if(savefig_flag):plt.savefig(save_name + "_load_calculation_dCNdx.png")
if(savepdf_flag):pdf.savefig()
print(u"入力値出力終了")
# ==== 軸力 ====
print(u"軸力計算中...")
[X_dotdot,drag,force_A] = calc_axial_load(rocket,thrust_a,q_a,rating_time)
print(u"軸力計算終了")
print(u"軸力出力開始")
plt.figure(4)
X_dotdot_a =[]
for (i, time) in enumerate(rating_time):
plt.plot(range(rocket.length+1),force_A[time], label = "%s" % (rating_label[i]))
X_dotdot_a.append(X_dotdot[time])
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA mm")
plt.ylabel("Axial load N")
plt.ylim(ymin=-18000)
plt.title("Axial load")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + "_load_calculation_axial_load.png")
if(savepdf_flag):pdf.savefig()
print(u"軸力出力終了")
# ==== 曲げモーメント ====
print(u"曲げモーメント計算中...")
[M_a, M_d, M1, M2, M_max, Z_dotdot_d, Z_dotdot_a, Omega_dot_d, Omega_dot_a] = calc_bending_moment(rocket,thrust_a,q_a,rating_time)
print(u"曲げモーメント計算終了")
# ==== 曲げモーメントのPLOT ====
print(u"曲げモーメント出力開始")
Z_dotdot_d_a = []
Z_dotdot_a_a = []
Omega_dot_d_a = []
Omega_dot_a_a = []
for (i, time) in enumerate(rating_time):
Z_dotdot_d_a.append(Z_dotdot_d[time])
Z_dotdot_a_a.append(Z_dotdot_a[time])
# 集約表作成
plt.figure(5)
plt.plot(range(rocket.length+1),M1[time], label="%s" % (rating_label[i]))
plt.figure(6)
plt.plot(range(rocket.length+1),M2[time], label="%s" % (rating_label[i]))
plt.figure(7)
plt.plot(range(rocket.length+1),M_max[time], label="%s" % (rating_label[i]))
# 曲げモーメント内訳
plt.figure()
#plt.plot(range(rocket.length+1),M_a1[time], label = "Air Force")
#plt.plot(range(rocket.length+1),M_a2[time], label = "Air Force Inertia")
#plt.plot(range(rocket.length+1),M_d1[time], label = "Gimbal")
#plt.plot(range(rocket.length+1),M_d2[time], label = "Gimbal Inertia")
plt.plot(range(rocket.length+1),M_a[time], label = "Air Force")
plt.plot(range(rocket.length+1),M_d[time], label = "Gimbal")
plt.axhline(y=0, color = "k", linestyle="--", alpha = 0.2)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.legend(loc = "best")
plt.xlabel("STA mm")
plt.ylabel("Bending Moment Nm")
plt.title("%s Breakdown of BMD" % (rating_label[i]))
if(savefig_flag):plt.savefig(save_name + "_load_calculation_bending_moment_%s.png" % (rating_label[i]))
if(savepdf_flag):pdf.savefig()
if(fitting_flag): # 曲げモーメント曲線のフィッティング
fitting_6lines(rocket.x, M1[time], x1, x2, x3, x4, x5, x6, x_end,True, True, True,
"%s_M++_%s" % (save_name, rating_label[i]), "%s_M++_%s" % (save_name, rating_label[i]))
if(savepdf_flag):pdf.savefig()
fitting_6lines(rocket.x, M2[time], x1, x2, x3, x4, x5, x6, x_end,True, False, True,
"%s_M+-_%s" % (save_name, rating_label[i]), "%s_M+-_%s" % (save_name, rating_label[i]))
if(savepdf_flag):pdf.savefig()
plt.figure(5)
plt.axhline(y=0, color = "k", linestyle="--", alpha = 0.2)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA mm")
plt.ylabel("Bending Moment Nm")
plt.title("Bending Moment (Airforce + Gimbal)")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + u"_load_calculation_BendingMoment_same_sign.png")
if(savepdf_flag):pdf.savefig()
plt.figure(6)
plt.axhline(y=0, color = "k", linestyle="--", alpha = 0.2)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA mm")
plt.ylabel("Bending Moment Nm")
plt.title("Bending Moment (Airforce - Gimbal)")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + u"_load_calculation_BendingMoment_different_sign.png")
if(savepdf_flag):pdf.savefig()
plt.figure(7)
plt.axhline(y=0, color = "k", linestyle="--", alpha = 0.2)
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA mm")
plt.ylabel("Bending Moment Nm")
plt.title("Max Bending Moment(abs)")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + u"_load_calculation_BendingMoment_Max.png")
if(savepdf_flag):pdf.savefig()
print(u"曲げモーメント出力終了")
# ==== 等価軸力 ====
print(u"等価軸力計算中...")
[F_eq_comp, F_eq_tens]=calc_equivalent_axial_force(force_A,M_max,dia)
print(u"等価軸力計算終了")
print(u"等価軸力出力開始")
plt.figure()
F_eq_comp_a =[]
for (i, time) in enumerate(rating_time):
plt.plot(range(rocket.length+1),-F_eq_comp[time], label = "%s" % (rating_label[i]))
F_eq_comp_a.append(F_eq_comp[time])
for j in rocket.divid:
plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
plt.xlabel("STA mm")
plt.ylabel("Axial force N")
plt.ylim(ymin=0)
plt.title("Equivalent Axial Force (Compression)")
plt.legend(loc="best")
if(savefig_flag):plt.savefig(save_name + "_load_calculation_equivalent_axial_force_compressive.png")
if(savepdf_flag):pdf.savefig()
#plt.figure()
#F_eq_tens_a =[]
#for (i, time) in enumerate(rating_time):
# plt.plot(range(rocket.length+1),F_eq_tens[time], label = "%s" % (rating_label[i]))
# F_eq_tens_a.append(F_eq_tens[time])
#for j in rocket.divid:
# plt.axvline(x=j, color = "k", linestyle="--", alpha = 0.2)
#plt.xlabel("STA mm")
#plt.ylabel("Axial force N")
#plt.ylim(ymin=0)
#plt.title("Equivalent Axial Force (Tensile)")
#plt.legend(loc="best")
#if(savefig_flag):plt.savefig(save_name + "_load_calculation_equivalent_axial_force_tensile.png")
#if(savepdf_flag):pdf.savefig()
print(u"等価軸力出力終了")
# ==== 評定荷重 ====
print(u"評定荷重計算中...")
force_A_rating = calc_rating_load(force_A, rocket.divid,rating_time,-1)
M_max_rating = calc_rating_load(M_max, rocket.divid,rating_time, 1)
F_eq_tens_rating = calc_rating_load(F_eq_tens,rocket.divid,rating_time, 1)
F_eq_comp_rating = calc_rating_load(F_eq_comp,rocket.divid,rating_time,-1)
print(u"評定荷重計算終了")
print(u"評定荷重出力開始")
fp = open(save_name + "_rating.csv", "w") # 出力先をファイルに変更
rocket_divid_sta = [v for v in rocket.divid]
fp.write("開始位置[mm]," + ",".join(map(str,rocket_divid_sta[0:-1]))+"\n")
fp.write("終了位置[mm]," + ",".join(map(str,rocket_divid_sta[1:]))+"\n")
fp.write("軸力[N]," + ",".join(map(str,force_A_rating))+"\n")
fp.write("曲げモーメント[Nm]," + ",".join(map(str,M_max_rating))+"\n")
#fp.write("等価軸引張力[N]," + ",".join(map(str,F_eq_tens_rating))+"\n")
fp.write("等価軸圧縮力[N]," + ",".join(map(str,F_eq_comp_rating))+"\n")
fp.close()
print(u"評定荷重出力終了")
# ==== 後処理====
# plt.show()
if(savepdf_flag):pdf.close()
print(u"処理時間:%.1f sec" % (tm.time() - process_start))
# ==== 文字出力 ====
sys.stdout = open(save_name + u"_output.txt", "w") # 出力先をファイルに変更
print(u"==== 結果出力 ====")
print(u"★ 入力値")
print(u"機体直径 = %d mm,\t機体断面積 = %.3f m2" % (dia*1e3, rocket.area))
print(u"燃焼時間 = %d 秒" % (burntime))
# print(u"")
print(u"凡例 :\t\t\t[LiftOff, MaxQ, MaxDrag, MECO]")
print(u"時刻 :\t\t\t[%d sec, %d sec, %d sec, %d sec]" % (rating_time[0], rating_time[1], rating_time[2], rating_time[3]))
print(u"動圧 :\t\t\t[%.1f Pa, %.1f Pa, %.1f Pa, %.1f Pa]" % (q[0], q[1], q[2], q[3]))
print(u"推力 :\t\t\t[%.1f N, %.1f N, %.1f N, %.1f N]" % (thrust[0], thrust[1], thrust[2], thrust[3]))
print(u"ジンバル横推力 :\t[%.1f N, %.1f N, %.1f N, %.1f N]" % (T_g[0], T_g[1], T_g[2], T_g[3]))
print(u"軸力係数C_A :\t\t[%.1f , %.1f , %.1f , %.1f ]" % (C_A, C_A, C_A, C_A))
print(u"法線力係数C_N :\t\t[%.1f , %.1f , %.1f , %.1f ]" % (C_N, C_N, C_N, C_N))
print(u"風圧中心x_CP :\t\t[%.1f mm, %.1f mm, %.1f mm, %.1f mm]" % (rocket.x_CP[rating_time[0]], rocket.x_CP[rating_time[1]], rocket.x_CP[rating_time[2]], rocket.x_CP[rating_time[3]]))
print(u"重心x_CG(参考) :\t[%.1f mm, %.1f mm, %.1f mm, %.1f mm]" % (rocket.x_CG[rating_time[0]], rocket.x_CG[rating_time[1]], rocket.x_CG[rating_time[2]], rocket.x_CG[rating_time[3]]))
print(u"分割点[mm] :\t\t", end="")
print(rocket.divid)
print(u"")
print(u"★ 計算結果")
print(u"軸方向加速度 :\t\t\t[%.1f m/s2, %.1f m/s2, %.1f m/s2, %.1f m/s2]" % (X_dotdot_a[0], X_dotdot_a[1], X_dotdot_a[2], X_dotdot_a[3]))
print(u"垂直方向加速度(空気力) :\t[%.1f m/s2, %.1f m/s2, %.1f m/s2, %.1f m/s2]" % (Z_dotdot_a_a[0], Z_dotdot_a_a[1], Z_dotdot_a_a[2], Z_dotdot_a_a[3]))
print(u"垂直方向加速度(ジンバル) :\t[%.1f m/s2, %.1f m/s2, %.1f m/s2, %.1f m/s2]" % (Z_dotdot_d_a[0], Z_dotdot_d_a[1], Z_dotdot_d_a[2], Z_dotdot_d_a[3]))
print(u"")
print(u"==== コンポーネント ====")
print(u"[長さ mm,\tドライ重量 kg,\t推進剤重量 kg,\t推進剤空時 kg]")
print(u"fairing :\t", end="")
nose.show()
print(u"2nd_LOx_tank :\t", end="")
tank_2nd_LOx.show()
print(u"2nd_fuel_tank :\t", end="")
tank_2nd_fuel.show()
print(u"1st_LOx_tank :\t", end="")
tank_1st_LOx.show()
print(u"1st_fuel_tank :\t", end="")
tank_1st_fuel.show()
print(u"fin :\t", end="")
fin.show()
print(u"engine :\t", end="")
engine.show()
# 一端ファイルに出力させたものを標準出力に呼び出している
sys.stdout.close()
sys.stdout = sys.__stdout__
print(open(save_name + u"_output.txt","r").read())
|
the-stack_0_27999
|
# Desenpacotamento de parametros:
def contador(* num):
# o * sinaliza um desenpacotamento de parametros
print(num)
contador(2, 1, 4, 6)
contador(4, 9)
contador(5, 6, 2, 7, 0, 9, 6, 7)
print('*' * 30)
print('*' * 30)
# ---------------------------
# outro jeito de imprimir, com for
def contador2(* num2):
for valor in num2:
print(f'{valor} ', end='')
print('Fim')
contador2(2, 1, 4, 6)
contador2(4, 9)
contador2(5, 6, 2, 7, 0, 9, 6, 7)
print('*' * 30)
print('*' * 30)
#---------------------------
# usando len:
def contador3(* num):
tamanho = len(num)
print(f'Recebi os valores {num} e são ao todo {tamanho} números!')
contador3(2, 1, 4, 6)
contador3(4, 9)
contador3(5, 6, 2, 7, 0, 9, 6, 7)
|
the-stack_0_28000
|
# https://gist.github.com/adoc/8550490
def unpad(bytestring, k=16):
"""
Remove the PKCS#7 padding from a text bytestring.
"""
val = bytestring[-1]
if val > k:
raise ValueError('Input is not padded or padding is corrupt')
l = len(bytestring) - val
return bytestring[:l]
## @param bytestring The text to encode.
## @param k The padding block size.
# @return bytestring The padded bytestring.
def pad(bytestring, k=16):
"""
Pad an input bytestring according to PKCS#7
"""
l = len(bytestring)
val = k - (l % k)
return bytestring + bytearray([val] * val)
|
the-stack_0_28001
|
from helper import unittest, PillowTestCase, hopper
from array import array
import sys
from PIL import Image
class TestImagePutData(PillowTestCase):
def test_sanity(self):
im1 = hopper()
data = list(im1.getdata())
im2 = Image.new(im1.mode, im1.size, 0)
im2.putdata(data)
self.assert_image_equal(im1, im2)
# readonly
im2 = Image.new(im1.mode, im2.size, 0)
im2.readonly = 1
im2.putdata(data)
self.assertFalse(im2.readonly)
self.assert_image_equal(im1, im2)
def test_long_integers(self):
# see bug-200802-systemerror
def put(value):
im = Image.new("RGBA", (1, 1))
im.putdata([value])
return im.getpixel((0, 0))
self.assertEqual(put(0xFFFFFFFF), (255, 255, 255, 255))
self.assertEqual(put(0xFFFFFFFF), (255, 255, 255, 255))
self.assertEqual(put(-1), (255, 255, 255, 255))
self.assertEqual(put(-1), (255, 255, 255, 255))
if sys.maxsize > 2**32:
self.assertEqual(put(sys.maxsize), (255, 255, 255, 255))
else:
self.assertEqual(put(sys.maxsize), (255, 255, 255, 127))
def test_pypy_performance(self):
im = Image.new('L', (256, 256))
im.putdata(list(range(256))*256)
def test_mode_i(self):
src = hopper('L')
data = list(src.getdata())
im = Image.new('I', src.size, 0)
im.putdata(data, 2, 256)
target = [2 * elt + 256 for elt in data]
self.assertEqual(list(im.getdata()), target)
def test_mode_F(self):
src = hopper('L')
data = list(src.getdata())
im = Image.new('F', src.size, 0)
im.putdata(data, 2.0, 256.0)
target = [2.0 * float(elt) + 256.0 for elt in data]
self.assertEqual(list(im.getdata()), target)
def test_array_B(self):
# shouldn't segfault
# see https://github.com/python-pillow/Pillow/issues/1008
arr = array('B', [0])*15000
im = Image.new('L', (150, 100))
im.putdata(arr)
self.assertEqual(len(im.getdata()), len(arr))
def test_array_F(self):
# shouldn't segfault
# see https://github.com/python-pillow/Pillow/issues/1008
im = Image.new('F', (150, 100))
arr = array('f', [0.0])*15000
im.putdata(arr)
self.assertEqual(len(im.getdata()), len(arr))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_28002
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import pickle
import io, os
import lockdir
class DontHaveLockError(Exception):
def __init__(self):
Exception.__init__(self, 'error: you don\'t have lock (thread_safe is enabled so you need to get_lock())')
# When locking is set to True, multiprocess safety is ensured using the lockdir module:
#
# * The load() method automatically locks the object's file before reading it.
# * The create() method locks the file before it creates it.
# * The delete() method releases the file after it deletes it.
# * The save() method doesn't do anything with the lock, as it works with a locked file.
#
# The PickleStorage object keeps track of all its locked files and releases them
# when it is destructed (the __del__() method).
#
# The user should call the release() method on an object when no longer using it.
# This makes it possible to once again load() it. Calling load() on an object whose
# file is locked fails because it is unable to lock it.
#
# The file being locked only prevents other processes from accessing it,
# not the process that created it (see "locked_by_me" in _lock_file()).
# So the user of PickleStorage doesn't really need to care about releasing
# the files if the PickleStorage object only lasts for a moment. In that
# case it is enough that the destructor (the __del__() method) takes care
# of releasing the locks.
#
# Still, it is better to release the object as soon as it is no longer used.
#
# An alternative to waiting for the PickleStorage object's destruction is
# to call its close() method. Like the destructor, it releases all the locks.
# After calling close(), the PickleStorage object is no longer usable.
#
class PickleStorage:
PERMISSIONS = 0o777
def __init__(self, directory, locking=True):
self._storage_dir = directory
self._closed = False
if locking:
self._locking = True
self._my_locked_files = set() # the files locked by this instance of PickleStorage
else:
self._locking = False
def storage_dir(self):
return self._storage_dir
def load(self, class_, args_to_key_func, args, obj_to_key_func):
if self._closed:
raise Exception('this %s object has been closed' % self.__class__.__name)
classkey = class_.__name__
key = args_to_key_func(**args)
filename = self._filename(classkey, key)
if self._locking:
self._lock_file(filename) # lock the file before reading it
try:
text_io = io.open(filename, mode='rb')
except IOError:
raise KeyError('cannot load: \n\
object %s %s \n\
does not exist in the storage \n\
(failed to open file "%s" for reading)' % (classkey, key, filename))
obj = pickle.load(text_io)
text_io.close()
assert key == obj_to_key_func(obj)
return obj
def save(self, obj_to_key_func, obj):
if self._closed:
raise Exception('this %s object has been closed' % self.__class__.__name)
classkey = obj.__class__.__name__
key = obj_to_key_func(obj)
filename = self._filename(classkey, key)
try:
text_io = io.open(filename, mode='rb+')
except IOError:
raise KeyError('cannot save: \n\
object %s %s \n\
does not exist in the storage \n\
(failed to open file "%s" for modification)' % (classkey, key, filename))
pickle.dump(obj, text_io)
text_io.close()
def create(self, obj_to_key_func, obj):
if self._closed:
raise Exception('this %s object has been closed' % self.__class__.__name)
classkey = obj.__class__.__name__
key = obj_to_key_func(obj)
filename = self._filename(classkey, key)
if os.path.isfile(filename):
raise KeyError('cannot create: \n\
object %s %s \n\
already exists in the storage \n\
(file %s exists)' % (classkey, key, filename))
self._ensure_class_dir(classkey)
if self._locking:
self._lock_file(filename) # lock the file before creating it
text_io = io.open(filename, mode='wb')
pickle.dump(obj, text_io)
text_io.close()
if self.PERMISSIONS is not None:
os.chmod(filename, self.PERMISSIONS)
def delete(self, obj_to_key_func, obj):
if self._closed:
raise Exception('this %s object has been closed' % self.__class__.__name)
classkey = obj.__class__.__name__
key = obj_to_key_func(obj)
filename = self._filename(classkey, key)
if not os.path.isfile(filename):
raise KeyError('cannot delete: \n\
object %s %s \n\
does not exist in the storage \n\
(file %s does not exist)' % (classkey, key, filename))
os.remove(filename)
if self._locking:
self._release_file(filename) # release the file after deleting it
def _filename(self, classkey, objkey):
return self._storage_dir + '/' + classkey + '/' + objkey + '.pickle'
def _class_dir_name(self, classkey):
return self._storage_dir + '/' + classkey
def _ensure_class_dir(self, classkey):
class_dir = self._class_dir_name(classkey)
if not os.path.isdir(class_dir):
os.mkdir(class_dir)
if self.PERMISSIONS is not None:
os.chmod(class_dir, self.PERMISSIONS)
assert os.path.isdir(class_dir)
def release(self, obj_to_key_func, obj):
assert self._locking
classkey = obj.__class__.__name__
key = obj_to_key_func(obj)
filename = self._filename(classkey, key)
self._release_file(filename)
def close(self):
if self._locking:
files = self._my_locked_files.copy()
for filename in files:
self._release_file(filename)
self._closed = True
def __del__(self):
if self._locking:
files = self._my_locked_files.copy()
for filename in files:
self._release_file(filename)
def _lock_file(self, filename):
assert self._locking
# if the file is locked and it is locked by this process, then allow access without locking;
# otherwise, lock the file
if lockdir.is_locked(filename, lockdir.RWLOCK) and lockdir.pid_of_lock(filename, lockdir.RWLOCK) == os.getpid():
locked_by_me = True
else:
locked_by_me = False
if not locked_by_me:
# lock the file
lockdir.lock(filename, lockdir.RWLOCK)
assert lockdir.is_locked(filename, lockdir.RWLOCK)
assert lockdir.pid_of_lock(filename, lockdir.RWLOCK) == os.getpid()
# register the filename to the set of open files so the lock can be released
# automatically by __del__()
self._my_locked_files.add(filename)
def _release_file(self, filename):
assert self._locking
lockdir.release(filename, lockdir.RWLOCK)
self._my_locked_files.remove(filename)
|
the-stack_0_28003
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from airflow.contrib.utils.weekday import WeekDay
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class DayOfWeekSensor(BaseSensorOperator):
"""
Waits until the first specified day of the week. For example, if the execution
day of the task is '2018-12-22' (Saturday) and you pass 'FRIDAY', the task will wait
until next Friday.
**Example** (with single day): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day='Saturday',
use_task_execution_day=True,
dag=dag)
**Example** (with multiple day using set): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={'Saturday', 'Sunday'},
use_task_execution_day=True,
dag=dag)
**Example** (with :class:`~airflow.contrib.utils.weekday.WeekDay` enum): ::
# import WeekDay Enum
from airflow.contrib.utils.weekday import WeekDay
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={WeekDay.SATURDAY, WeekDay.SUNDAY},
use_task_execution_day=True,
dag=dag)
:param week_day: Day of the week to check (full name). Optionally, a set
of days can also be provided using a set.
Example values:
* ``"MONDAY"``,
* ``{"Saturday", "Sunday"}``
* ``{WeekDay.TUESDAY}``
* ``{WeekDay.SATURDAY, WeekDay.SUNDAY}``
:type week_day: set or str or WeekDay
:param use_task_execution_day: If ``True``, uses task's execution day to compare
with week_day. Execution Date is Useful for backfilling.
If ``False``, uses system's day of the week. Useful when you
don't want to run anything on weekdays on the system.
:type use_task_execution_day: bool
"""
@apply_defaults
def __init__(self, week_day,
use_task_execution_day=False,
*args, **kwargs):
super(DayOfWeekSensor, self).__init__(*args, **kwargs)
self.week_day = week_day
self.use_task_execution_day = use_task_execution_day
if isinstance(self.week_day, six.string_types):
self._week_day_num = {WeekDay.get_weekday_number(week_day_str=self.week_day)}
elif isinstance(self.week_day, WeekDay):
self._week_day_num = {self.week_day}
elif isinstance(self.week_day, set):
if all(isinstance(day, six.string_types) for day in self.week_day):
self._week_day_num = {WeekDay.get_weekday_number(day) for day in week_day}
elif all(isinstance(day, WeekDay) for day in self.week_day):
self._week_day_num = self.week_day
else:
raise TypeError(
'Unsupported Type for week_day parameter: {}. It should be one of str'
', set or Weekday enum type'.format(type(week_day)))
def poke(self, context):
self.log.info('Poking until weekday is in %s, Today is %s',
self.week_day,
WeekDay(timezone.utcnow().isoweekday()).name)
if self.use_task_execution_day:
return context['execution_date'].isoweekday() in self._week_day_num
else:
return timezone.utcnow().isoweekday() in self._week_day_num
|
the-stack_0_28004
|
from __future__ import unicode_literals
import sys
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils import six
from django.utils.datastructures import OrderedSet
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@python_2_unicode_compatible
@total_ordering
class Node(object):
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
class DummyNode(Node):
def __init__(self, key, origin, error_message):
super(DummyNode, self).__init__(key)
self.origin = origin
self.error_message = error_message
def __repr__(self):
return '<DummyNode: (%r, %r)>' % self.key
def promote(self):
"""
Transition dummy to a normal node and clean off excess attribs.
Creating a Node object from scratch would be too much of a
hassle as many dependendies would need to be remapped.
"""
del self.origin
del self.error_message
self.__class__ = Node
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
@python_2_unicode_compatible
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, migration):
# If the key already exists, then it must be a dummy node.
dummy_node = self.node_map.get(key)
if dummy_node:
# Promote DummyNode to Node.
dummy_node.promote()
else:
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
self.clear_cache()
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist.
If `skip_validation` is set, validate_consistency should be called afterwards.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
self.clear_cache()
def remove_replaced_nodes(self, replacement, replaced):
"""
Removes each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as exc:
exc_value = NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement, ),
replacement
)
exc_value.__cause__ = exc
if not hasattr(exc, '__traceback__'):
exc.__traceback__ = sys.exc_info()[2]
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
self.clear_cache()
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Removes the
replacement node `replacement` and remaps its child nodes to
`replaced` - the list of nodes it would have replaced. Its parent
nodes are not remapped as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as exc:
exc_value = NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement, ),
replacement
)
exc_value.__cause__ = exc
if not hasattr(exc, '__traceback__'):
exc.__traceback__ = sys.exc_info()[2]
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
self.clear_cache()
def validate_consistency(self):
"""
Ensure there are no dummy nodes remaining in the graph.
"""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
|
the-stack_0_28008
|
#
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/Apache-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
import tempfile
from hadoop.local_fs import LocalFileSystem
from pyangel.conf import AngelConf
from pyangel.context import Configuration
from pyangel.ml.conf import MLConf
from pyangel.ml.regression.runner import LinearRegRunner
class LinearRegLocalExample(object):
"""
Linear Regression Example used for user test, similar to "com.tencent.angel.example.ml.LinearRegLocalExample".
"""
def __init__(self):
self.conf= Configuration()
def set_conf(self):
"""
Set up self.configuration for runtime environment.
"""
# Feature number of train data
feature_num = 101
# Total iteration number
epoch_num = 20
# Validation sample ratio
v_ratio = 0.5
# Data format,libsvm or dummy
data_fmt = 'libsvm'
# Train batch number per epoch
sp_ratio = 1
# Learning rate
learn_rate = 0.1
# Decay of learning rate
decay = 0.01
# Regularization coefficient
reg = 0
# Set job queue, if you use YARN deploy mode, you can set job queue by
# self.conf.set('mapreduce.job.queue.name', 'default')
# Set local deploy mode
self.conf.set(AngelConf.ANGEL_DEPLOY_MODE, 'LOCAL')
# Set basic self.configuration keys
self.conf.set_boolean('mapred.mapper.new-api', True)
self.conf.set(AngelConf.ANGEL_INPUTFORMAT_CLASS, 'org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat')
self.conf.set_boolean(AngelConf.ANGEL_JOB_OUTPUT_PATH_DELETEONEXIST, True)
# Set data format
self.conf.set(MLConf.ML_DATAFORMAT, data_fmt)
# set angel resource parameters #worker, #tast, #ps
self.conf.set_int(AngelConf.ANGEL_WORKERGROUP_NUMBER, 2)
self.conf.set_int(AngelConf.ANGEL_WORKER_TASK_NUMBER, 10)
self.conf.set_int(AngelConf.ANGEL_PS_NUMBER, 2)
# set sgd LR algorithim parameters # feature # epoch
self.conf.set(MLConf.ML_FEATURE_NUM, str(feature_num))
self.conf.set(MLConf.ML_EPOCH_NUM, str(epoch_num))
self.conf.set(MLConf.ML_BATCH_SAMPLE_Ratio, str(sp_ratio))
self.conf.set(MLConf.ML_VALIDATE_RATIO, str(v_ratio))
self.conf.set(MLConf.ML_LEARN_RATE, str(learn_rate))
self.conf.set(MLConf.ML_LEARN_DECAY, str(decay))
self.conf.set(MLConf.ML_REG_L2, str(reg))
def train_on_local_cluster(self):
"""
Train model on local cluster
"""
self.set_conf()
input_path = '../data/exampledata/LinearRegression'
LOCAL_FS = LocalFileSystem.DEFAULT_FS
TMP_PATH = tempfile.gettempdir()
log_path = ".src/test/log"
model_path = 'file:///tmp/angel/model'
self.conf.set(AngelConf.ANGEL_TRAIN_DATA_PATH, input_path)
self.conf.set(AngelConf.ANGEL_SAVE_MODEL_PATH, model_path)
self.conf.set(AngelConf.ANGEL_LOG_PATH, log_path)
self.conf.set(AngelConf.ANGEL_ACTION_TYPE, MLConf.ANGEL_ML_TRAIN)
self.conf.set("fs.defaultFS", LOCAL_FS + TMP_PATH)
runner = LinearRegRunner()
runner.train(self.conf)
def inc_train(self):
self.set_conf()
input_path = "../data/exampledata/LinearRegression/LinearReg100.train"
LOCAL_FS = LocalFileSystem.DEFAULT_FS
TMP_PATH = tempfile.gettempdir()
log_path = "./src/test/log"
# Set trainning data path
self.conf.set(AngelConf.ANGEL_TRAIN_DATA_PATH, inputPath)
# Set load model path
self.conf.set(AngelConf.ANGEL_LOAD_MODEL_PATH, LOCAL_FS + TMP_PATH + "/model")
# Set save model path
self.conf.set(AngelConf.ANGEL_SAVE_MODEL_PATH, LOCAL_FS + TMP_PATH + "/newmodel")
# Set actionType incremental train
self.conf.set(AngelConf.ANGEL_ACTION_TYPE, MLConf.ANGEL_ML_INC_TRAIN())
# Set log path
self.conf.set(AngelConf.ANGEL_LOG_PATH, logPath)
runner = LinearRegRunner()
runner.incTrain(self.conf)
def predict(self):
self.set_conf()
input_path = "../data/exampledata/LinearRegression/LinearReg100.train"
LOCAL_FS = LocalFileSystem.DEFAULT_FS
TMP_PATH = tempfile.gettempdir()
# Set trainning data path
self.conf.set(AngelConf.ANGEL_TRAIN_DATA_PATH, inputPath)
# Set load model path
self.conf.set(AngelConf.ANGEL_LOAD_MODEL_PATH, LOCAL_FS + TMP_PATH + "/model")
# Set predict result path
self.conf.set(AngelConf.ANGEL_PREDICT_PATH, LOCAL_FS + TMP_PATH + "/predict")
# Set actionType prediction
self.conf.set(AngelConf.ANGEL_ACTION_TYPE, MLConf.ANGEL_ML_PREDICT())
runner = LinearRegRunner()
runner.predict(self.conf)
example = LinearRegLocalExample()
example.train_on_local_cluster()
|
the-stack_0_28010
|
# 653. Two Sum IV - Input is a BST easy
# Given a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum is equal to the given target.
#
# Example 1:
#
# Input:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
#
# Target = 9
#
# Output: True
#
#
# Example 2:
#
# Input:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
#
# Target = 28
#
# Output: False
def findTarget(self, root: TreeNode, k: int) -> bool:
import queue
seen_nums = {}
q = queue.Queue()
q.put(root)
while(not q.empty()):
node = q.get()
if node.val in seen_nums:
return True
else:
seen_nums[k-node.val] = 1
if node.left:
q.put(node.left)
if node.right:
q.put(node.right)
return False
|
the-stack_0_28013
|
import pandas as pd
import numpy as np
import random as rd
from pathlib import Path
from datetime import datetime as dt
import tensorflow as tf
import tensorflow.keras.optimizers as opt
from tensorflow.keras import Input
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, load_model, clone_model, Model
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization, concatenate
from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint
def initGPU():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def getBiasFreeIndex(boolSeries, size, seed=1337):
rd.seed(seed)
def getCenteredIndex(onSize=True):
def flatter(a, b):
c = []
for i in range(len(a)):
c.append(a[i])
c.append(b[i])
return c
positive = boolSeries[boolSeries==True]
negative = boolSeries[boolSeries==False]
if onSize:
positive = rd.sample(list(positive.index), size//2)
negative = rd.sample(list(negative.index), size//2)
else:
if len(positive) > len(negative):
positive = rd.sample(list(positive.index), len(negative))
negative = negative.index.tolist()
else:
positive = positive.index.tolist()
negative = rd.sample(list(negative.index), len(positive))
return flatter(positive, negative)
training = getCenteredIndex()
boolSeries = boolSeries.loc[list(set(boolSeries.index)-set(training))]
validation = getCenteredIndex(False)
return training, validation
def divideDataByIndex(data, index):
return data.loc[index[0]], data.loc[index[1]]
def saveData(path, dataTuple):
dataTuple[0][0].to_csv(path/'predictors_training.csv')
dataTuple[0][1].to_csv(path/'predictors_validation.csv')
dataTuple[1][0].to_csv(path/'targets_training.csv')
dataTuple[1][1].to_csv(path/'targets_validation.csv')
def loadData(path):
predictors = [None, None]
targets = [None, None]
predictors[0] = pd.read_csv(path/'predictors_training.csv', index_col=0)
predictors[1] = pd.read_csv(path/'predictors_validation.csv', index_col=0)
targets[0] = pd.read_csv(path/'targets_training.csv', index_col=0)
targets[1] = pd.read_csv(path/'targets_validation.csv', index_col=0)
return tuple(predictors), tuple(targets)
def getModel(blueprint, predictors, targets, metric):
def getOutput():
if bool==targets[0].dtypes[0]:
activation = 'sigmoid'
loss = 'binary_crossentropy'
else:
activation = None
loss = 'MSE'
model.add(Dense(targets[0].columns.size, activation=activation, kernel_initializer='ones', name=("T_"+str(hash(name))[-4:]+"_"+str(len(model.layers)+2))))
model.compile(optimizer=blueprint['optimizer'], loss=loss, metrics=[metric])
return model
name = blueprint['identifier']
model = Sequential(name=name)
model.add(Input(shape=(predictors[0].columns.size,), name=("I_"+str(hash(name))[-8:]+"_"+str(0))))
for index, nodes in enumerate(blueprint['layers']):
activation = blueprint['activations'][index]
if activation=='None':
activation = None
model.add(Dense(nodes, activation, kernel_initializer='ones', name=("D_"+str(hash(name))[-4:]+"_"+str(index+1))))
if blueprint['dropouts'][index]>0:
model.add(Dropout(blueprint['dropouts'][index]/nodes, name=("O_"+str(hash(name))[-4:]+"_"+str(index+1))))
model.add(BatchNormalization(name=("B_"+str(hash(name))[-4:]+"_"+str(len(model.layers)+1))))
return getOutput()
def getBatchSize(size, minimum=1000):
sizes = []
for i in range((size//2)+1, 2, -1):
if ((size % i)) == 0 and (size//i>1000) and (size//i<size//6):
sizes.append(size//i)
return sizes[len(sizes)//2]
def row2string(row):
s = ""
for value in row.values():
if isinstance(value, list):
v = '"'+str(value)+'",'
else:
v = str(value)+","
s = s + v
return s[0:-1]
def metrics2row(blueprint, metrics):
def sumNodes(layers):
return len(layers), sum(layers)
row = {}
row['timestamp'] = dt.now()
row.update(blueprint.copy())
row['dimensions'] = len(blueprint['predictors'])
row['length'], row['nodes'] = sumNodes(blueprint['layers'])
row.update(metrics)
return row2string(row)
def training(path, blueprint, predictors, targets, metric, epochs=100, start=0.1, stop=0.01, output='row'):
stepping = round(epochs/(start/stop)**0.7)
epochRange = range(epochs, 0, -stepping)
decrease = (stop/start)**(1/(len(epochRange)-1))
model = getModel(blueprint, predictors, targets, metric)
model.optimizer.lr = start
lr = start
modelPath = path/(blueprint['identifier']+'.h5')
model.save(modelPath)
trained = 0
start = dt.now()
for epoch in epochRange:
monitor = EarlyStopping(monitor=('val_'+metric),restore_best_weights=True, patience=epoch)
history = model.fit(predictors[0], targets[0], getBatchSize(len(targets[0])), epoch, 0, [monitor], validation_data=(predictors[1], targets[1]))
image = load_model(modelPath)
imageMetric = image.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)[metric]
modelMetric = model.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)[metric]
if imageMetric>modelMetric:
model = image
else:
trained = trained+len(history.history[metric])
model.save(modelPath)
lr = lr*decrease
model.optimizer.lr = lr
time = round((dt.now()-start).microseconds/1000000, 2)
metrics = model.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)
metrics['time'] = time
metrics['epochs'] = trained
if output=='metric':
return metrics
elif output=='row':
return metrics2row(blueprint, metrics)
elif output=='ensemble':
return metrics2row(blueprint, metrics),(
pd.DataFrame(model.predict(predictors[0]), columns=targets[0].columns, index=targets[0].index),
pd.DataFrame(model.predict(predictors[1]), columns=targets[1].columns, index=targets[1].index))
def trainingRoutine(path, predictors, targets, metric, minimise, minDuration, maxDuration, start, stop):
def row2log(name, row):
with open(path/name,'a') as logCSV:
logCSV.write('\n')
logCSV.write(row)
def loadLog(name):
df = pd.read_csv(path/name, index_col=False)
if df.empty:
return df
df['identifier'] = df['identifier'].astype(str)
for column in df:
if df[column].dtype==object:
if (df[column][0].find("[")>-1 and df[column][0].find("]")>-1):
df[column] = df[column].str.replace("'","").str.replace(", ",",").str.replace("[","").str.replace("]","").str.split(",")
if column=='layers' or column=='dropouts':
newCol = []
for element in df[column].tolist():
newElement = []
for value in element:
newElement.append(int(value))
newCol.append(newElement)
df[column] = pd.Series(newCol)
return df
def getBest(frame, identifier, output=[]):
if not output:
output = list(frame.columns)
frame = frame[frame['identifier']==identifier]
if minimise:
frame = frame[frame[metric]==frame[metric].min()]
else:
frame = frame[frame[metric]==frame[metric].max()]
if len(frame)>1:
frame = frame[frame['loss']==frame['loss'].min()]
if len(frame)>1:
frame = frame[frame['epochs']==frame['epochs'].min()]
if len(frame)>1:
frame = frame[frame['nodes']==frame['nodes'].min()]
if len(frame)>1:
frame = frame[frame['time']==frame['time'].min()]
return frame[output].to_dict('records')[0]
else:
return frame[output].to_dict('records')[0]
else:
return frame[output].to_dict('records')[0]
else:
return frame[output].to_dict('records')[0]
else:
return frame[output].to_dict('records')[0]
def getIdentifier(sample):
def string2int(string):
value = 0
for char in string:
value = value+ord(char)*1337*len(string)*31
return value
identifier = 0
for string in sample:
identifier = identifier + string2int(string)
return str(identifier)[-16:]
def predictorTraining(epsilon=8):
def sample(columns, bias, maxNodes):
tries = []
def appendTry(tries, row):
if row[metric]<bias:
return
if len(tries)<epsilon:
tries.append(row)
tries = sorted(tries, key=lambda tup: tup[metric])
elif tries[0][metric]<row[metric]:
tries[0] = row
tries = sorted(tries, key=lambda tup: tup[metric])
def check(identifier):
frame = loadLog('predictors_log.csv')
if frame.empty:
return frame
else:
return frame[frame['identifier']==identifier]
pool = list(set(predictors[0].columns)-set(columns))
for column in pool:
samples = columns+[column]
identifier = getIdentifier(samples)
backlog = check(identifier)
if backlog.empty:
samples = (predictors[0][samples], predictors[1][samples])
print("Trying: ", column)
print("Identifier:", identifier)
tryStart = dt.now()
row = parameterTraining(samples, maxNodes, identifier, maxNodes*10)
print("Endurance:", dt.now()-tryStart)
row2log('predictors_log.csv', row)
row = getBest(loadLog('predictors_log.csv'), identifier)
print("Metric: ", row[metric])
appendTry(tries, row)
else:
backlog = getBest(backlog, identifier)
print("Skipping: ", column)
print("Identifier:", identifier)
print("Metric: ", backlog[metric])
appendTry(tries, backlog)
return tries
def trace(line, bias, nodes):
trial = sample(line, bias, nodes)
if not trial:
return
for entry in trial:
preds = entry['predictors']
maxNodes = max([(entry['nodes']/len(preds))*(len(preds)+1), len(preds)*5])
print(maxNodes)
trace(preds, entry[metric], round(maxNodes))
trace([], 0.5, 10)
def parameterTraining(predictors, maxNodes, identifier, epochs):
keys = ['predictors','identifier','optimizer','layers','activations','dropouts']
def getDuration(nodes):
return minDuration+round((maxDuration-minDuration)*((nodes-1)/(maxNodes-1)))
def check(blueprint):
frame = loadLog('parameter_log.csv').astype(str)
return frame[frame[list(blueprint.keys())].isin(pd.Series(blueprint).astype(str).tolist()).all(axis=1)]
def evaluating(model, epochs):
monitor = EarlyStopping(monitor=('val_'+metric),restore_best_weights=True, patience=minDuration)
start = dt.now()
history = model.fit(predictors[0], targets[0], getBatchSize(len(targets[0])), epochs, 0, [monitor], validation_data=(predictors[1], targets[1]))
time = (dt.now()-start).total_seconds()
metrics = model.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)
metrics['time'] = time
metrics['epochs'] = len(history.history[metric])
return metrics
def getSize():
blueprint = dict(zip(keys, [list(predictors[0].columns),identifier,'adam',[0],['None'],[0]]))
for i in range(maxNodes):
i = i
for width in range(1, maxNodes-sum(blueprint['layers'])+1):
blueprint['layers'][-1] = width
backlog = check(blueprint)
if backlog.empty:
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, getDuration(sum(blueprint['layers'])))
row2log('parameter_log.csv', metrics2row(blueprint, metrics))
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=list(blueprint.keys()))
if blueprint['layers'][-1]==1:
break
blueprint['layers'] = blueprint['layers']+[0]
blueprint['activations'] = blueprint['activations']+[blueprint['activations'][0]]
blueprint['dropouts'] = blueprint['dropouts']+[blueprint['dropouts'][0]]
def getActivations():
maxD = maxDuration
possibilities = ['None','relu','selu','elu','tanh','softsign','softplus']
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=keys)
for i in range(len(blueprint['layers'])):
for activation in possibilities:
blueprint['activations'][i] = activation
backlog = check(blueprint)
if backlog.empty:
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, maxD)
row2log('parameter_log.csv', metrics2row(blueprint, metrics))
else:
best = getBest(backlog, identifier=identifier)
maxD = int(best['epochs'])
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=keys)
def getDropouts():
maxD = maxDuration
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=keys)
for i, v in enumerate(blueprint['layers']):
for drop in range(v):
blueprint['dropouts'][i] = drop
backlog = check(blueprint)
if backlog.empty:
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, maxD)
row2log('parameter_log.csv', metrics2row(blueprint, metrics))
else:
best = getBest(backlog, identifier=identifier)
maxD = int(best['epochs'])
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=keys)
def getOptimizer():
maxD = maxDuration
blueprint = getBest(loadLog('parameter_log.csv'), identifier, output=keys)
possibilities = ['adam','sgd','rmsprop','adadelta','adagrad','adamax','nadam']
for optimizer in possibilities:
blueprint['optimizer'] = optimizer
backlog = check(blueprint)
if backlog.empty:
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, maxD)
row2log('parameter_log.csv', metrics2row(blueprint, metrics))
else:
best = getBest(backlog, identifier=identifier)
maxD = int(best['epochs'])
getSize()
getActivations()
getDropouts()
getOptimizer()
return training(path/'Models', getBest(loadLog('parameter_log.csv'), identifier, output=keys), predictors, targets, metric, epochs, start, stop)
predictorTraining()
path = Path(__file__).parent.absolute()/'Learning'/'Deep Training'
#targets = pd.read_csv(Path(__file__).parent.absolute()/'Learning'/'None_Targets.csv', index_col=False, usecols=['Home: Win', 'Visiting: Win'])
#index = getBiasFreeIndex(targets['Home: Win'], 72500)
#targets = divideDataByIndex(targets, index)
#predictors = pd.read_csv(Path(__file__).parent.absolute()/'Learning'/'None_Predictors.csv', index_col=False)
#predictors = divideDataByIndex(predictors, index)
#saveData(path/'Data', (predictors, targets))
predictors, targets = loadData(path/'Data')
trainingRoutine(path, predictors, targets, 'binary_accuracy', False, 20, 100, 0.1, 0.01)
|
the-stack_0_28014
|
from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Domain(BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this parcoords trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['column']
@column.setter
def column(self, val):
self['column'] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this parcoords trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['row']
@row.setter
def row(self, val):
self['row'] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this parcoords trace (in plot
fraction).
The 'x' property is an info array that may be specified as a
list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this parcoords trace (in plot
fraction).
The 'y' property is an info array that may be specified as a
list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'parcoords'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this parcoords trace .
row
If there is a layout grid, use the domain for this row
in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords trace (in
plot fraction).
y
Sets the vertical domain of this parcoords trace (in
plot fraction).
"""
def __init__(
self, arg=None, column=None, row=None, x=None, y=None, **kwargs
):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.parcoords.Domain
column
If there is a layout grid, use the domain for this
column in the grid for this parcoords trace .
row
If there is a layout grid, use the domain for this row
in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords trace (in
plot fraction).
y
Sets the vertical domain of this parcoords trace (in
plot fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__('domain')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.Domain
constructor must be a dict or
an instance of plotly.graph_objs.parcoords.Domain"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.parcoords import (domain as v_domain)
# Initialize validators
# ---------------------
self._validators['column'] = v_domain.ColumnValidator()
self._validators['row'] = v_domain.RowValidator()
self._validators['x'] = v_domain.XValidator()
self._validators['y'] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('column', None)
self['column'] = column if column is not None else _v
_v = arg.pop('row', None)
self['row'] = row if row is not None else _v
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_0_28015
|
"""
The 'instructions' widget for Morse Trainer.
Used to show a QLabel containing instructions. Read only.
instructions = Instructions()
"""
from PyQt5.QtWidgets import (QWidget, QTextEdit, QVBoxLayout)
class Instructions(QWidget):
def __init__(self, text):
"""Create instructions containing 'text'."""
QWidget.__init__(self)
self.initUI(text)
self.show()
def initUI(self, text):
# define the widgets in this group
doc = QTextEdit(self)
doc.setReadOnly(True)
doc.insertPlainText(text)
# start the layout
layout = QVBoxLayout()
layout.addWidget(doc)
self.setLayout(layout)
|
the-stack_0_28016
|
from pydantic import BaseModel, EmailStr, HttpUrl, ValidationError
class User(BaseModel):
email: EmailStr
website: HttpUrl
# Invalid email
try:
User(email="jdoe", website="https://www.example.com")
except ValidationError as e:
print(str(e))
# Invalid URL
try:
User(email="[email protected]", website="jdoe")
except ValidationError as e:
print(str(e))
# Valid
user = User(email="[email protected]", website="https://www.example.com")
# email='[email protected]' website=HttpUrl('https://www.example.com', scheme='https', host='www.example.com', tld='com', host_type='domain')
print(user)
|
the-stack_0_28017
|
from klampt import *
from klampt.control.robotinterfaceutils import RobotInterfaceCompleter,MultiRobotInterface
from klampt.control.robotinterface import RobotInterfaceBase
from klampt.control.interop import RobotInterfacetoVis
from klampt.control.simrobotinterface import *
from klampt.control.cartesian_drive import *
from klampt.math import vectorops,so3
from klampt import vis
import math
import time
import csv
def testCompleter():
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
r = w.robot(0)
sim = Simulator(w)
#TODO: CHANGE ME
#controller = RobotInterfaceCompleter(KinematicSimControlInterface(r))
#controller = RobotInterfaceCompleter(SimPositionControlInterface(sim.controller(0),sim))
#controller = RobotInterfaceCompleter(SimMoveToControlInterface(sim.controller(0),sim))
#controller = RobotInterfaceCompleter(SimVelocityControlInterface(sim.controller(0),sim))
controller = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(0),sim))
testProperties = ['controlRate','parts','sensors','numDOFs','indices']
testFuncs = ['clock','status','isMoving',
'sensedPosition','sensedVelocity','sensedTorque','commandedPosition','commandedVelocity','commandedTorque',
'destinationPosition','destinationVelocity','destinationTime']
if not controller.initialize():
raise RuntimeError("There was some problem initializing controller "+str(controller))
#start logger
testFile = open('controllertest_results.csv','w',newline='')
testWriter = csv.writer(testFile)
testWriter.writerow(testProperties)
results = []
for prop in testProperties:
try:
results.append(str(getattr(controller,prop)()))
except Exception as e:
results.append('Error '+str(e))
testWriter.writerow(results)
testWriter.writerow(['emulatorControlMode','baseControlMode']+testFuncs)
if controller.numDOFs() != r.numDrivers():
raise RuntimeError("Invalid DOFs")
if controller.klamptModel() is None:
raise RuntimeError("Can't get Klampt model")
q = r.getConfig()[1:]
q2 = [x for x in q]
q2[2] -= 1.0
q2[3] -= 1.0
controller.setToolCoordinates([0,0,0])
#TODO: CHANGE ME
"""
#testing a single movement
moves = [(1.0,lambda: controller.setPiecewiseLinear([1],[q[:2]+[q[2]+1.0]+q[3:]]))]
"""
#testing general movements with interruption
moves = [(0.5,lambda: controller.setVelocity([0]*2+[1.0]+[0]*(len(q)-3),1.0)),
(1.0,lambda: controller.setPiecewiseLinear([1],[q[:2]+[q[2]+1.0]+q[3:]])),
(3.0,lambda: controller.setPiecewiseCubic([1],[q],[[0]*len(q)])),
(3.5,lambda: controller.moveToPosition(q[:2]+[q[2]-1.0]+q[3:])),
(5.0,lambda: controller.moveToPosition(q,0.1)),
(5.5,lambda: controller.moveToPosition(q2,1.0)),
(8.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(10.0,lambda: controller.setCartesianVelocity([0,0,0.2],3.0)),
(11.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0])))
]
"""
#testing interrupted cartesian velocity movements
moves = [(0.5,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(2.0,lambda: controller.setCartesianVelocity([0,0,0.1],5.0)) ,
#(3.0,lambda: controller.moveToPosition(q,1))
(3.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0])))
]
"""
"""
#testing cartesian velocity movements
moves = [(0.5,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(2.0,lambda: controller.setCartesianVelocity([0,0,0.1],5.0))
]
"""
visplugin = RobotInterfacetoVis(controller)
#visplugin.tag = ''
endTime = 13.0
lastClock = 0
dt = 1.0/controller.controlRate()
vis.add("world",w)
vis.show()
while controller.status() == 'ok' and vis.shown(): #no error handling done here...
t0 = time.time()
vis.lock()
controller.startStep()
clock = controller.clock()
if (clock % 1.0) <= dt:
controller.print_status()
for (trigger,callback) in moves:
if clock > trigger and lastClock <= trigger:
print("Calling trigger",trigger)
callback()
lastClock = clock
visplugin.update()
if controller.clock() > endTime:
vis.unlock()
break
controller.endStep()
#log results to disk
results = [controller._emulatorControlMode,controller._baseControlMode]
for func in testFuncs:
try:
results.append(str(getattr(controller,func)()))
except Exception as e:
results.append('Error '+str(e))
testWriter.writerow(results)
if isinstance(controller._base,KinematicSimControlInterface):
r.setConfig(controller.configToKlampt(controller.commandedPosition()))
else:
sim.updateWorld()
#give visualization some chance to update
vis.unlock()
t1 = time.time()
telapsed = t1 - t0
time.sleep(max(dt - telapsed,0))
if vis.shown():
print("STATUS CHANGED TO",controller.status())
print("FINAL CLOCK",controller.clock())
controller.print_status()
vis.show(False)
vis.clear()
testFile.close()
def testCartesianDrive():
w = WorldModel()
#w.readFile("../../data/tx90scenario0.xml")
w.readFile("../../data/robots/jaco.rob")
r = w.robot(0)
solver = CartesianDriveSolver(r)
#set a non-singular configuration
q = r.getConfig()
q[3] = 0.5
r.setConfig(q)
solver.start(q,6)
vis.add("world",w)
vis.addPlot("timing")
vis.addPlot("info")
vis.show()
time.sleep(0.1)
dt = 0.01
t = 0
while t < 20 and vis.shown():
vis.lock()
if t < 2:
v = [0,0,0.25]
elif t < 3:
v = [0,0,-0.1]
elif t < 3.2:
v = [0,0,-1]
elif t < 8:
v = [0,0,0]
elif t < 10:
v = [-1,0,0]
else:
v = [1,0,0]
if t < 4:
w = [0,0,0]
elif t < 10:
w = [0,-0.25,0]
else:
w = None
t0 = time.time()
progress, qnext = solver.drive(q,w,v,dt)
t1 = time.time()
vis.addText("debug","Vel %s"%(str(v),))
vis.logPlot("timing","t",t1-t0)
vis.logPlot("info","progress",progress)
vis.logPlot("info","adj",solver.driveSpeedAdjustment)
r.setConfig(qnext)
q = qnext
vis.unlock()
vis.add("tgt",solver.driveTransforms[0])
t += dt
time.sleep(max(0.005-(t1-t0),0))
vis.show(False)
vis.clear()
def testMultiRobot():
#Create a world with two robots -- this will be the simulation world
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
w.readFile("../../data/robots/jaco.rob")
r1 = w.robot(0)
r2 = w.robot(1)
#Create a world with a unified robot -- this will be the controller's model of the robot
w2 = w.copy()
w2.robot(0).mount(-1,w2.robot(1),so3.identity(),[1,0,0.5])
w2.remove(w2.robot(1))
whole_robot_model = w2.robot(0)
robot_1_indices = list(range(r1.numLinks()))
robot_2_indices = list(range(r1.numLinks(),r1.numLinks()+r2.numLinks()))
#update the base transform of robot 2
T0 = r2.link(0).getParentTransform()
r2.link(0).setParentTransform(T0[0],vectorops.add(T0[1],[1,0,0.5]))
r2.setConfig(r2.getConfig())
#Note: don't pass sim as the second argument to SimXControlInterface; we will need to simulate ourselves
sim = Simulator(w)
sim_controller1 = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(0)))
sim_controller2 = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(1)))
whole_robot_controller = MultiRobotInterface()
whole_robot_controller.addPart("Robot 1",sim_controller1,whole_robot_model,robot_1_indices)
whole_robot_controller.addPart("Robot 2",sim_controller2,whole_robot_model,robot_2_indices)
print("Num total DOFs",whole_robot_controller.numDOFs())
print("Parts:")
for k,v in whole_robot_controller.parts().items():
print(" ",k,":",v)
print("Control rate",whole_robot_controller.controlRate())
print(whole_robot_controller.getPartInterface("Robot 1").__class__.__name__)
print(whole_robot_controller.getPartInterface("Robot 2").__class__.__name__)
if not whole_robot_controller.initialize():
raise RuntimeError("Failed to initialize")
visplugin1 = RobotInterfacetoVis(whole_robot_controller.getPartInterface("Robot 1"),0)
visplugin1.text_x = 10
visplugin1.tag = ''
visplugin2 = RobotInterfacetoVis(whole_robot_controller.getPartInterface("Robot 2"),1)
visplugin2.text_x = 200
visplugin2.tag = 'a'
vis.add("world",w)
#vis.add("world",w2)
#vis.edit(("world",whole_robot_model))
vis.add("qdes",sim_controller1.configToKlampt(sim_controller1.sensedPosition()),color=[1,0,0,0.5],robot=0)
vis.add("qdes2",sim_controller2.configToKlampt(sim_controller2.sensedPosition()),color=[1,1,0,0.5],robot=1)
vis.show()
dt = 1.0/whole_robot_controller.controlRate()
while vis.shown():
t0 = time.time()
vis.lock()
whole_robot_controller.startStep()
#send commands here
clock = whole_robot_controller.clock()
if clock > 0.5 and clock < 2.5:
velocity = [0]*whole_robot_controller.numDOFs()
velocity[2] = -0.1
velocity[10] = 0.3
whole_robot_controller.setVelocity(velocity,None)
elif clock >= 2.5 and clock < 2.75:
velocity = [0]*whole_robot_controller.numDOFs()
whole_robot_controller.setVelocity(velocity)
elif clock > 2.75 and clock < 2.80:
tgt = [0]*sim_controller1.numDOFs()
tgt[2] = 1.0
whole_robot_controller.getPartInterface("Robot 1").moveToPosition(tgt)
visplugin1.update()
visplugin2.update()
whole_robot_controller.endStep()
#update the simulator
sim.simulate(dt)
#update the visualization world
sim.updateWorld()
vis.add("qdes",sim_controller1.configToKlampt(sim_controller1.sensedPosition()),color=[1,0,0,0.5],robot=0)
vis.add("qdes2",sim_controller2.configToKlampt(sim_controller2.sensedPosition()),color=[1,1,0,0.5],robot=1)
#whole_robot_model.setConfig(r1.getConfig()+r2.getConfig())
vis.unlock()
t1 = time.time()
telapsed = t1 - t0
time.sleep(max(dt - telapsed,0))
vis.clear()
#testCartesianDrive()
#testCompleter()
#testJoints()
testMultiRobot()
vis.kill()
|
the-stack_0_28018
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test beam search helper methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.transformer.model import beam_search
class BeamSearchHelperTests(tf.test.TestCase):
def setUp(self):
super(BeamSearchHelperTests, self).setUp()
tf.compat.v1.disable_eager_execution()
def test_expand_to_beam_size(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._expand_to_beam_size(x, 3)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 3, 4, 2, 5], shape)
def test_shape_list(self):
y = tf.compat.v1.placeholder(dtype=tf.int32, shape=[])
x = tf.ones([7, y, 2, 5])
shape = beam_search._shape_list(x)
self.assertIsInstance(shape[0], int)
self.assertIsInstance(shape[1], tf.Tensor)
self.assertIsInstance(shape[2], int)
self.assertIsInstance(shape[3], int)
def test_get_shape_keep_last_dim(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5])
shape = beam_search._get_shape_keep_last_dim(x)
self.assertAllEqual([None, None, None, 5],
shape.as_list())
def test_flatten_beam_dim(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._flatten_beam_dim(x)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([28, 2, 5], shape)
def test_unflatten_beam_dim(self):
x = tf.ones([28, 2, 5])
x = beam_search._unflatten_beam_dim(x, 7, 4)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 4, 2, 5], shape)
def test_gather_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
# x looks like: [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
y = beam_search._gather_beams(x, [[1, 2], [0, 2]], 2, 2)
with self.session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
def test_gather_topk_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
x_scores = [[0, 1, 1], [1, 0, 1]]
y = beam_search._gather_topk_beams(x, x_scores, 2, 2)
with self.session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_28019
|
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from ambari_commons.str_utils import string_set_equals
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
class ADH15StackAdvisor(ADH14StackAdvisor):
def __init__(self):
super(ADH15StackAdvisor, self).__init__()
Logger.initialize_logger()
self.HIVE_INTERACTIVE_SITE = 'hive-interactive-site'
self.YARN_ROOT_DEFAULT_QUEUE_NAME = 'default'
self.AMBARI_MANAGED_LLAP_QUEUE_NAME = 'llap'
self.CONFIG_VALUE_UINITIALIZED = 'SET_ON_FIRST_INVOCATION'
self.CLUSTER_CREATE_OPERATION = "ClusterCreate"
def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor,self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
putOozieEnvProperty = self.putProperty(configurations, "oozie-env", services)
if not "oozie-env" in services["configurations"] :
Logger.info("No oozie configurations available")
return
if not "FALCON_SERVER" in clusterData["components"] :
Logger.info("Falcon is not part of the installation")
return
falconUser = 'falcon'
if "falcon-env" in services["configurations"] :
if "falcon_user" in services["configurations"]["falcon-env"]["properties"] :
falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
Logger.info("Falcon user from configuration: %s " % falconUser)
Logger.info("Falcon user : %s" % falconUser)
oozieUser = 'oozie'
if "oozie_user" \
in services["configurations"]["oozie-env"]["properties"] :
oozieUser = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
Logger.info("Oozie user from configuration %s" % oozieUser)
Logger.info("Oozie user %s" % oozieUser)
if "oozie_admin_users" \
in services["configurations"]["oozie-env"]["properties"] :
currentAdminUsers = services["configurations"]["oozie-env"]["properties"]["oozie_admin_users"]
Logger.info("Oozie admin users from configuration %s" % currentAdminUsers)
else :
currentAdminUsers = "{0}, oozie-admin".format(oozieUser)
Logger.info("Setting default oozie admin users to %s" % currentAdminUsers)
if falconUser in currentAdminUsers :
Logger.info("Falcon user %s already member of oozie admin users " % falconUser)
return
newAdminUsers = "{0},{1}".format(currentAdminUsers, falconUser)
Logger.info("new oozie admin users : %s" % newAdminUsers)
services["forced-configurations"].append({"type" : "oozie-env", "name" : "oozie_admin_users"})
putOozieEnvProperty("oozie_admin_users", newAdminUsers)
def createComponentLayoutRecommendations(self, services, hosts):
parentComponentLayoutRecommendations = super(ADH15StackAdvisor, self).createComponentLayoutRecommendations(
services, hosts)
return parentComponentLayoutRecommendations
def getComponentLayoutValidations(self, services, hosts):
parentItems = super(ADH15StackAdvisor, self).getComponentLayoutValidations(services, hosts)
childItems = []
hsi_hosts = self.getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
if len(hsi_hosts) > 1:
message = "Only one host can install HIVE_SERVER_INTERACTIVE. "
childItems.append(
{"type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'HIVE_SERVER_INTERACTIVE'})
parentItems.extend(childItems)
return parentItems
def getServiceConfigurationValidators(self):
parentValidators = super(ADH15StackAdvisor, self).getServiceConfigurationValidators()
childValidators = {
"ATLAS": {"application-properties": self.validateAtlasConfigurations},
"HIVE": {"hive-interactive-env": self.validateHiveInteractiveEnvConfigurations,
"hive-interactive-site": self.validateHiveInteractiveSiteConfigurations,
"hive-env": self.validateHiveConfigurationsEnv},
"YARN": {"yarn-site": self.validateYARNConfigurations},
"RANGER": {"ranger-tagsync-site": self.validateRangerTagsyncConfigurations},
"SPARK2": {"spark2-defaults": self.validateSpark2Defaults,
"spark2-thrift-sparkconf": self.validateSpark2ThriftSparkConf},
"STORM": {"storm-site": self.validateStormConfigurations},
}
self.mergeValidators(parentValidators, childValidators)
return parentValidators
def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
super(ADH15StackAdvisor, self).validateStormConfigurations(properties, recommendedDefaults, configurations, services, hosts)
validationItems = []
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
# Storm AMS integration
if 'AMBARI_METRICS' in servicesList:
if "storm.cluster.metrics.consumer.register" in properties and \
'null' in properties.get("storm.cluster.metrics.consumer.register"):
validationItems.append({"config-name": 'storm.cluster.metrics.consumer.register',
"item": self.getWarnItem(
"Should be set to recommended value to report metrics to Ambari Metrics service.")})
if "topology.metrics.consumer.register" in properties and \
'null' in properties.get("topology.metrics.consumer.register"):
validationItems.append({"config-name": 'topology.metrics.consumer.register',
"item": self.getWarnItem(
"Should be set to recommended value to report metrics to Ambari Metrics service.")})
return self.toConfigurationValidationProblems(validationItems, "storm-site")
def getCardinalitiesDict(self, hosts):
result = super(ADH15StackAdvisor, self).getCardinalitiesDict(hosts)
min_val = 1
if len(hosts["items"]) > 999:
min_val = 2
result['METRICS_COLLECTOR'] = {"min": min_val}
return result
def validateAtlasConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
application_properties = self.getSiteProperties(configurations, "application-properties")
validationItems = []
auth_type = application_properties['atlas.authentication.method.ldap.type']
auth_ldap_enable = application_properties['atlas.authentication.method.ldap'].lower() == 'true'
Logger.info("Validating Atlas configs, authentication type: %s" % str(auth_type))
# Required props
ldap_props = {"atlas.authentication.method.ldap.url": "",
"atlas.authentication.method.ldap.userDNpattern": "uid=",
"atlas.authentication.method.ldap.groupSearchBase": "",
"atlas.authentication.method.ldap.groupSearchFilter": "",
"atlas.authentication.method.ldap.groupRoleAttribute": "cn",
"atlas.authentication.method.ldap.base.dn": "",
"atlas.authentication.method.ldap.bind.dn": "",
"atlas.authentication.method.ldap.bind.password": "",
"atlas.authentication.method.ldap.user.searchfilter": ""
}
ad_props = {"atlas.authentication.method.ldap.ad.domain": "",
"atlas.authentication.method.ldap.ad.url": "",
"atlas.authentication.method.ldap.ad.base.dn": "",
"atlas.authentication.method.ldap.ad.bind.dn": "",
"atlas.authentication.method.ldap.ad.bind.password": "",
"atlas.authentication.method.ldap.ad.user.searchfilter": "(sAMAccountName={0})"
}
props_to_require = set()
if auth_type.lower() == "ldap":
props_to_require = set(ldap_props.keys())
elif auth_type.lower() == "ad":
props_to_require = set(ad_props.keys())
elif auth_type.lower() == "none":
pass
if auth_ldap_enable:
for prop in props_to_require:
if prop not in application_properties or application_properties[prop] is None or application_properties[prop].strip() == "":
validationItems.append({"config-name": prop,
"item": self.getErrorItem("If authentication type is %s, this property is required." % auth_type)})
if application_properties['atlas.graph.index.search.backend'] == 'solr5' and \
not application_properties['atlas.graph.index.search.solr.zookeeper-url']:
validationItems.append({"config-name": "atlas.graph.index.search.solr.zookeeper-url",
"item": self.getErrorItem(
"If AMBARI_INFRA is not installed then the SOLR zookeeper url configuration must be specified.")})
if not application_properties['atlas.kafka.bootstrap.servers']:
validationItems.append({"config-name": "atlas.kafka.bootstrap.servers",
"item": self.getErrorItem(
"If KAFKA is not installed then the Kafka bootstrap servers configuration must be specified.")})
if not application_properties['atlas.kafka.zookeeper.connect']:
validationItems.append({"config-name": "atlas.kafka.zookeeper.connect",
"item": self.getErrorItem(
"If KAFKA is not installed then the Kafka zookeeper quorum configuration must be specified.")})
if application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' in services['configurations']:
hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
if not application_properties['atlas.graph.storage.hostname']:
validationItems.append({"config-name": "atlas.graph.storage.hostname",
"item": self.getErrorItem(
"If HBASE is not installed then the hbase zookeeper quorum configuration must be specified.")})
elif string_set_equals(application_properties['atlas.graph.storage.hostname'], hbase_zookeeper_quorum):
validationItems.append({"config-name": "atlas.graph.storage.hostname",
"item": self.getWarnItem(
"Atlas is configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
"item": self.getErrorItem(
"If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
elif application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' not in services[
'configurations']:
if not application_properties['atlas.graph.storage.hostname']:
validationItems.append({"config-name": "atlas.graph.storage.hostname",
"item": self.getErrorItem(
"Atlas is not configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
"item": self.getErrorItem(
"If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
validationProblems = self.toConfigurationValidationProblems(validationItems, "application-properties")
return validationProblems
def validateSpark2Defaults(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [
{
"config-name": 'spark.yarn.queue',
"item": self.validatorYarnQueue(properties, recommendedDefaults, 'spark.yarn.queue', services)
}
]
return self.toConfigurationValidationProblems(validationItems, "spark2-defaults")
def validateSpark2ThriftSparkConf(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [
{
"config-name": 'spark.yarn.queue',
"item": self.validatorYarnQueue(properties, recommendedDefaults, 'spark.yarn.queue', services)
}
]
return self.toConfigurationValidationProblems(validationItems, "spark2-thrift-sparkconf")
def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
parentValidationProblems = super(ADH15StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
validationItems = []
hsi_hosts = self.getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
if len(hsi_hosts) > 0:
# HIVE_SERVER_INTERACTIVE is mapped to a host
if 'yarn.resourcemanager.work-preserving-recovery.enabled' not in yarn_site_properties or \
'true' != yarn_site_properties['yarn.resourcemanager.work-preserving-recovery.enabled']:
validationItems.append({"config-name": "yarn.resourcemanager.work-preserving-recovery.enabled",
"item": self.getWarnItem(
"While enabling HIVE_SERVER_INTERACTIVE it is recommended that you enable work preserving restart in YARN.")})
validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
validationProblems.extend(parentValidationProblems)
return validationProblems
def validateHiveInteractiveSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
"""
Does the following validation checks for HIVE_SERVER_INTERACTIVE's hive-interactive-site configs.
1. Queue selected in 'hive.llap.daemon.queue.name' config should be sized >= to minimum required to run LLAP
and Hive2 app.
2. Queue selected in 'hive.llap.daemon.queue.name' config state should not be 'STOPPED'.
3. 'hive.server2.enable.doAs' config should be set to 'false' for Hive2.
4. 'Maximum Total Concurrent Queries'(hive.server2.tez.sessions.per.default.queue) should not consume more that 50% of selected queue for LLAP.
5. if 'llap' queue is selected, in order to run Service Checks, 'remaining available capacity' in cluster is atleast 512 MB.
"""
validationItems = []
hsi_hosts = self.getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
llap_queue_name = None
llap_queue_cap_perc = None
MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS = 512
llap_queue_cap = None
hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
if len(hsi_hosts) == 0:
return []
# Get total cluster capacity
node_manager_host_list = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
node_manager_cnt = len(node_manager_host_list)
yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
total_cluster_cap = node_manager_cnt * yarn_nm_mem_in_mb
capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
if not capacity_scheduler_properties:
Logger.warning("Couldn't retrieve 'capacity-scheduler' properties while doing validation checks for Hive Server Interactive.")
return []
if hsi_site:
if "hive.llap.daemon.queue.name" in hsi_site and hsi_site['hive.llap.daemon.queue.name']:
llap_queue_name = hsi_site['hive.llap.daemon.queue.name']
llap_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_queue_name, total_cluster_cap)
if llap_queue_cap:
llap_queue_cap_perc = float(llap_queue_cap * 100 / total_cluster_cap)
min_reqd_queue_cap_perc = self.min_queue_perc_reqd_for_llap_and_hive_app(services, hosts, configurations)
# Validate that the selected queue in 'hive.llap.daemon.queue.name' should be sized >= to minimum required
# to run LLAP and Hive2 app.
if llap_queue_cap_perc < min_reqd_queue_cap_perc:
errMsg1 = "Selected queue '{0}' capacity ({1}%) is less than minimum required capacity ({2}%) for LLAP " \
"app to run".format(llap_queue_name, llap_queue_cap_perc, min_reqd_queue_cap_perc)
validationItems.append({"config-name": "hive.llap.daemon.queue.name", "item": self.getErrorItem(errMsg1)})
else:
Logger.error("Couldn't retrieve '{0}' queue's capacity from 'capacity-scheduler' while doing validation checks for "
"Hive Server Interactive.".format(llap_queue_name))
# Validate that current selected queue in 'hive.llap.daemon.queue.name' state is not STOPPED.
llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_queue_name)
if llap_selected_queue_state:
if llap_selected_queue_state == "STOPPED":
errMsg2 = "Selected queue '{0}' current state is : '{1}'. It is required to be in 'RUNNING' state for LLAP to run"\
.format(llap_queue_name, llap_selected_queue_state)
validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getErrorItem(errMsg2)})
else:
Logger.error("Couldn't retrieve '{0}' queue's state from 'capacity-scheduler' while doing validation checks for "
"Hive Server Interactive.".format(llap_queue_name))
else:
Logger.error("Couldn't retrieve 'hive.llap.daemon.queue.name' config from 'hive-interactive-site' while doing "
"validation checks for Hive Server Interactive.")
# Validate that 'hive.server2.enable.doAs' config is not set to 'true' for Hive2.
if 'hive.server2.enable.doAs' in hsi_site and hsi_site['hive.server2.enable.doAs'] == "true":
validationItems.append({"config-name": "hive.server2.enable.doAs", "item": self.getErrorItem("Value should be set to 'false' for Hive2.")})
# Validate that 'Maximum Total Concurrent Queries'(hive.server2.tez.sessions.per.default.queue) is not consuming more that
# 50% of selected queue for LLAP.
if llap_queue_cap and 'hive.server2.tez.sessions.per.default.queue' in hsi_site:
num_tez_sessions = hsi_site['hive.server2.tez.sessions.per.default.queue']
if num_tez_sessions:
num_tez_sessions = long(num_tez_sessions)
yarn_min_container_size = long(self.get_yarn_min_container_size(services, configurations))
tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_cap))
normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)
llap_selected_queue_cap_remaining = llap_queue_cap - (normalized_tez_am_container_size * num_tez_sessions)
if llap_selected_queue_cap_remaining <= llap_queue_cap/2:
errMsg3 = " Reducing the 'Maximum Total Concurrent Queries' (value: {0}) is advisable as it is consuming more than 50% of " \
"'{1}' queue for LLAP.".format(num_tez_sessions, llap_queue_name)
validationItems.append({"config-name": "hive.server2.tez.sessions.per.default.queue","item": self.getWarnItem(errMsg3)})
# Validate that 'remaining available capacity' in cluster is at least 512 MB, after 'llap' queue is selected,
# in order to run Service Checks.
if llap_queue_name and llap_queue_cap_perc and llap_queue_name == self.AMBARI_MANAGED_LLAP_QUEUE_NAME:
curr_selected_queue_for_llap_cap = float(llap_queue_cap_perc) / 100 * total_cluster_cap
available_cap_in_cluster = total_cluster_cap - curr_selected_queue_for_llap_cap
if available_cap_in_cluster < MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS:
errMsg4 = "Capacity used by '{0}' queue is '{1}'. Service checks may not run as remaining available capacity " \
"({2}) in cluster is less than 512 MB.".format(self.AMBARI_MANAGED_LLAP_QUEUE_NAME, curr_selected_queue_for_llap_cap, available_cap_in_cluster)
validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getWarnItem(errMsg4)})
validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-interactive-site")
return validationProblems
def validateHiveConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
parentValidationProblems = super(ADH15StackAdvisor, self).validateHiveConfigurationsEnv(properties, recommendedDefaults, configurations, services, hosts)
hive_site_properties = self.getSiteProperties(configurations, "hive-site")
hive_env_properties = self.getSiteProperties(configurations, "hive-env")
validationItems = []
if 'hive.server2.authentication' in hive_site_properties and "LDAP" == hive_site_properties['hive.server2.authentication']:
if 'alert_ldap_username' not in hive_env_properties or hive_env_properties['alert_ldap_username'] == "":
validationItems.append({"config-name": "alert_ldap_username",
"item": self.getWarnItem(
"Provide an user to be used for alerts. Hive authentication type LDAP requires valid LDAP credentials for the alerts.")})
if 'alert_ldap_password' not in hive_env_properties or hive_env_properties['alert_ldap_password'] == "":
validationItems.append({"config-name": "alert_ldap_password",
"item": self.getWarnItem(
"Provide the password for the alert user. Hive authentication type LDAP requires valid LDAP credentials for the alerts.")})
validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-env")
validationProblems.extend(parentValidationProblems)
return validationProblems
def validateHiveInteractiveEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
hive_site_env_properties = self.getSiteProperties(configurations, "hive-interactive-env")
yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
validationItems = []
hsi_hosts = self.getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
# Check for expecting 'enable_hive_interactive' is ON given that there is HSI on atleast one host present.
if len(hsi_hosts) > 0:
# HIVE_SERVER_INTERACTIVE is mapped to a host
if 'enable_hive_interactive' not in hive_site_env_properties or (
'enable_hive_interactive' in hive_site_env_properties and
hive_site_env_properties['enable_hive_interactive'].lower() != 'true'):
validationItems.append({"config-name": "enable_hive_interactive",
"item": self.getErrorItem(
"HIVE_SERVER_INTERACTIVE requires enable_hive_interactive in hive-interactive-env set to true.")})
else:
# no HIVE_SERVER_INTERACTIVE
if 'enable_hive_interactive' in hive_site_env_properties and hive_site_env_properties[
'enable_hive_interactive'].lower() != 'false':
validationItems.append({"config-name": "enable_hive_interactive",
"item": self.getErrorItem(
"enable_hive_interactive in hive-interactive-env should be set to false.")})
# Check for 'yarn.resourcemanager.scheduler.monitor.enable' config to be true if HSI is ON.
if yarn_site_properties and 'yarn.resourcemanager.scheduler.monitor.enable' in yarn_site_properties:
scheduler_monitor_enabled = yarn_site_properties['yarn.resourcemanager.scheduler.monitor.enable']
if scheduler_monitor_enabled.lower() == 'false' and hive_site_env_properties and 'enable_hive_interactive' in hive_site_env_properties and \
hive_site_env_properties['enable_hive_interactive'].lower() == 'true':
validationItems.append({"config-name": "enable_hive_interactive",
"item": self.getWarnItem(
"When enabling LLAP, set 'yarn.resourcemanager.scheduler.monitor.enable' to true to ensure that LLAP gets the full allocated capacity.")})
validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-interactive-env")
return validationProblems
def getServiceConfigurationRecommenderDict(self):
parentRecommendConfDict = super(ADH15StackAdvisor, self).getServiceConfigurationRecommenderDict()
childRecommendConfDict = {
"RANGER": self.recommendRangerConfigurations,
"HBASE": self.recommendHBASEConfigurations,
"HIVE": self.recommendHIVEConfigurations,
"ATLAS": self.recommendAtlasConfigurations,
"RANGER_KMS": self.recommendRangerKMSConfigurations,
"STORM": self.recommendStormConfigurations,
"OOZIE": self.recommendOozieConfigurations,
"SPARK2": self.recommendSpark2Configurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
def recommendSpark2Configurations(self, configurations, clusterData, services, hosts):
"""
:type configurations dict
:type clusterData dict
:type services dict
:type hosts dict
"""
putSparkProperty = self.putProperty(configurations, "spark2-defaults", services)
putSparkThriftSparkConf = self.putProperty(configurations, "spark2-thrift-sparkconf", services)
spark_queue = self.recommendYarnQueue(services, "spark2-defaults", "spark.yarn.queue")
if spark_queue is not None:
putSparkProperty("spark.yarn.queue", spark_queue)
spart_thrift_queue = self.recommendYarnQueue(services, "spark2-thrift-sparkconf", "spark.yarn.queue")
if spart_thrift_queue is not None:
putSparkThriftSparkConf("spark.yarn.queue", spart_thrift_queue)
def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
storm_site = self.getServicesSiteProperties(services, "storm-site")
storm_env = self.getServicesSiteProperties(services, "storm-env")
putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
security_enabled = self.isSecurityEnabled(services)
if storm_env and storm_site:
if security_enabled:
_storm_principal_name = storm_env['storm_principal_name'] if 'storm_principal_name' in storm_env else None
storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
if 'nimbus.impersonation.acl' in storm_site:
storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"]
storm_nimbus_impersonation_acl.replace('{{storm_bare_jaas_principal}}', storm_bare_jaas_principal)
putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
else:
if 'nimbus.impersonation.acl' in storm_site:
putStormSiteAttributes('nimbus.impersonation.acl', 'delete', 'true')
if 'nimbus.impersonation.authorizer' in storm_site:
putStormSiteAttributes('nimbus.impersonation.authorizer', 'delete', 'true')
rangerPluginEnabled = ''
if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in configurations['ranger-storm-plugin-properties']['properties']:
rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
storm_authorizer_class = 'org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer'
ranger_authorizer_class = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
# Cluster is kerberized
if security_enabled:
if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
putStormSiteProperty('nimbus.authorizer',ranger_authorizer_class)
else:
putStormSiteProperty('nimbus.authorizer', storm_authorizer_class)
else:
putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
# Storm AMS integration
if 'AMBARI_METRICS' in servicesList:
putStormSiteProperty('storm.cluster.metrics.consumer.register', '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]')
putStormSiteProperty('topology.metrics.consumer.register',
'[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '
'"parallelism.hint": 1, '
'"whitelist": ["kafkaOffset\\\..+/", "__complete-latency", "__process-latency", '
'"__receive\\\.population$", "__sendqueue\\\.population$", "__execute-count", "__emit-count", '
'"__ack-count", "__fail-count", "memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '
'"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')
else:
putStormSiteProperty('storm.cluster.metrics.consumer.register', 'null')
putStormSiteProperty('topology.metrics.consumer.register', 'null')
def constructAtlasRestAddress(self, services, hosts):
"""
:param services: Collection of services in the cluster with configs
:param hosts: Collection of hosts in the cluster
:return: The suggested property for atlas.rest.address if it is valid, otherwise, None
"""
atlas_rest_address = None
services_list = [service["StackServices"]["service_name"] for service in services["services"]]
is_atlas_in_cluster = "ATLAS" in services_list
atlas_server_hosts_info = self.getHostsWithComponent("ATLAS", "ATLAS_SERVER", services, hosts)
if is_atlas_in_cluster and atlas_server_hosts_info and len(atlas_server_hosts_info) > 0:
# Multiple Atlas Servers can exist, so sort by hostname to create deterministic csv
atlas_host_names = [e['Hosts']['host_name'] for e in atlas_server_hosts_info]
if len(atlas_host_names) > 1:
atlas_host_names = sorted(atlas_host_names)
scheme = "http"
metadata_port = "21000"
atlas_server_default_https_port = "21443"
tls_enabled = "false"
if 'application-properties' in services['configurations']:
if 'atlas.enableTLS' in services['configurations']['application-properties']['properties']:
tls_enabled = services['configurations']['application-properties']['properties']['atlas.enableTLS']
if 'atlas.server.http.port' in services['configurations']['application-properties']['properties']:
metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.http.port'])
if str(tls_enabled).lower() == "true":
scheme = "https"
if 'atlas.server.https.port' in services['configurations']['application-properties']['properties']:
metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.https.port'])
else:
metadata_port = atlas_server_default_https_port
atlas_rest_address_list = ["{0}://{1}:{2}".format(scheme, hostname, metadata_port) for hostname in atlas_host_names]
atlas_rest_address = ",".join(atlas_rest_address_list)
Logger.info("Constructing atlas.rest.address=%s" % atlas_rest_address)
return atlas_rest_address
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
putAtlasApplicationProperty = self.putProperty(configurations, "application-properties", services)
putAtlasRangerPluginProperty = self.putProperty(configurations, "ranger-atlas-plugin-properties", services)
putAtlasEnvProperty = self.putProperty(configurations, "atlas-env", services)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
# Generate atlas.rest.address since the value is always computed
atlas_rest_address = self.constructAtlasRestAddress(services, hosts)
if atlas_rest_address is not None:
putAtlasApplicationProperty("atlas.rest.address", atlas_rest_address)
if "AMBARI_INFRA" in servicesList and 'infra-solr-env' in services['configurations']:
if 'infra_solr_znode' in services['configurations']['infra-solr-env']['properties']:
infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode']
else:
infra_solr_znode = None
zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
zookeeper_host_arr = []
zookeeper_port = self.getZKPort(services)
for i in range(len(zookeeper_hosts)):
zookeeper_host = zookeeper_hosts[i] + ':' + zookeeper_port
if infra_solr_znode is not None:
zookeeper_host += infra_solr_znode
zookeeper_host_arr.append(zookeeper_host)
solr_zookeeper_url = ",".join(zookeeper_host_arr)
putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', solr_zookeeper_url)
else:
putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', "")
# Kafka section
if "KAFKA" in servicesList and 'kafka-broker' in services['configurations']:
kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", services)
if 'port' in services['configurations']['kafka-broker']['properties']:
kafka_broker_port = services['configurations']['kafka-broker']['properties']['port']
else:
kafka_broker_port = '6667'
if 'kafka-broker' in services['configurations'] and 'listeners' in services['configurations']['kafka-broker']['properties']:
kafka_server_listeners = services['configurations']['kafka-broker']['properties']['listeners']
else:
kafka_server_listeners = 'PLAINTEXT://localhost:6667'
security_enabled = self.isSecurityEnabled(services)
if ',' in kafka_server_listeners and len(kafka_server_listeners.split(',')) > 1:
for listener in kafka_server_listeners.split(','):
listener = listener.strip().split(':')
if len(listener) == 3:
if 'SASL' in listener[0] and security_enabled:
kafka_broker_port = listener[2]
break
elif 'SASL' not in listener[0] and not security_enabled:
kafka_broker_port = listener[2]
else:
listener = kafka_server_listeners.strip().split(':')
if len(listener) == 3:
kafka_broker_port = listener[2]
kafka_host_arr = []
for i in range(len(kafka_hosts)):
kafka_host_arr.append(kafka_hosts[i] + ':' + kafka_broker_port)
kafka_bootstrap_servers = ",".join(kafka_host_arr)
if 'zookeeper.connect' in services['configurations']['kafka-broker']['properties']:
kafka_zookeeper_connect = services['configurations']['kafka-broker']['properties']['zookeeper.connect']
else:
kafka_zookeeper_connect = None
putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', kafka_bootstrap_servers)
putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', kafka_zookeeper_connect)
else:
putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', "")
putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', "")
if "HBASE" in servicesList and 'hbase-site' in services['configurations']:
if 'hbase.zookeeper.quorum' in services['configurations']['hbase-site']['properties']:
hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
else:
hbase_zookeeper_quorum = ""
putAtlasApplicationProperty('atlas.graph.storage.hostname', hbase_zookeeper_quorum)
putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', hbase_zookeeper_quorum)
else:
putAtlasApplicationProperty('atlas.graph.storage.hostname', "")
putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', "")
if "ranger-env" in services["configurations"] and "ranger-atlas-plugin-properties" in services["configurations"] and \
"ranger-atlas-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
ranger_atlas_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-atlas-plugin-enabled"]
putAtlasRangerPluginProperty('ranger-atlas-plugin-enabled', ranger_atlas_plugin_enabled)
ranger_atlas_plugin_enabled = ''
if 'ranger-atlas-plugin-properties' in configurations and 'ranger-atlas-plugin-enabled' in configurations['ranger-atlas-plugin-properties']['properties']:
ranger_atlas_plugin_enabled = configurations['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
elif 'ranger-atlas-plugin-properties' in services['configurations'] and 'ranger-atlas-plugin-enabled' in services['configurations']['ranger-atlas-plugin-properties']['properties']:
ranger_atlas_plugin_enabled = services['configurations']['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
if ranger_atlas_plugin_enabled and (ranger_atlas_plugin_enabled.lower() == 'Yes'.lower()):
putAtlasApplicationProperty('atlas.authorizer.impl','ranger')
else:
putAtlasApplicationProperty('atlas.authorizer.impl','simple')
#atlas server memory settings
if 'atlas-env' in services['configurations']:
atlas_server_metadata_size = 50000
if 'atlas_server_metadata_size' in services['configurations']['atlas-env']['properties']:
atlas_server_metadata_size = float(services['configurations']['atlas-env']['properties']['atlas_server_metadata_size'])
atlas_server_xmx = 2048
if 300000 <= atlas_server_metadata_size < 500000:
atlas_server_xmx = 1024*5
if 500000 <= atlas_server_metadata_size < 1000000:
atlas_server_xmx = 1024*10
if atlas_server_metadata_size >= 1000000:
atlas_server_xmx = 1024*16
atlas_server_max_new_size = (atlas_server_xmx / 100) * 30
putAtlasEnvProperty("atlas_server_xmx", atlas_server_xmx)
putAtlasEnvProperty("atlas_server_max_new_size", atlas_server_max_new_size)
def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
if "cluster-env" in services["configurations"] \
and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
# Set the master's UI to readonly
putHbaseSiteProperty('hbase.master.ui.readonly', 'true')
phoenix_query_server_hosts = self.get_phoenix_query_server_hosts(services, hosts)
Logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts))
if phoenix_query_server_hosts:
Logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts))
# The PQS hosts we want to ensure are set
new_value = ','.join(phoenix_query_server_hosts)
# Update the proxyuser setting, deferring to out callback to merge results together
self.put_proxyuser_value("HTTP", new_value, services=services, configurations=configurations, put_function=putCoreSiteProperty)
else:
Logger.debug("No phoenix query server hosts to update")
else:
putHbaseSiteProperty('hbase.master.ui.readonly', 'false')
"""
Returns the list of Phoenix Query Server host names, or None.
"""
def get_phoenix_query_server_hosts(self, services, hosts):
if len(hosts['items']) > 0:
phoenix_query_server_hosts = self.getHostsWithComponent("HBASE", "PHOENIX_QUERY_SERVER", services, hosts)
if phoenix_query_server_hosts is None:
return []
return [host['Hosts']['host_name'] for host in phoenix_query_server_hosts]
def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
Logger.info("DBG: Invoked recommendHiveConfiguration")
super(ADH15StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
# For 'Hive Server Interactive', if the component exists.
hsi_hosts = self.getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
hsi_properties = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
if len(hsi_hosts) > 0:
putHiveInteractiveEnvProperty('enable_hive_interactive', 'true')
# Update 'hive.llap.daemon.queue.name' property attributes if capacity scheduler is changed.
if hsi_properties and 'hive.llap.daemon.queue.name' in hsi_properties:
self.setLlapDaemonQueuePropAttributes(services, configurations)
hsi_conf_properties = self.getSiteProperties(configurations, self.HIVE_INTERACTIVE_SITE)
hive_tez_default_queue = hsi_properties["hive.llap.daemon.queue.name"]
if hsi_conf_properties and "hive.llap.daemon.queue.name" in hsi_conf_properties:
hive_tez_default_queue = hsi_conf_properties['hive.llap.daemon.queue.name']
if hive_tez_default_queue:
putHiveInteractiveSiteProperty("hive.server2.tez.default.queues", hive_tez_default_queue)
Logger.debug("Updated 'hive.server2.tez.default.queues' config : '{0}'".format(hive_tez_default_queue))
else:
Logger.info("DBG: Setting 'num_llap_nodes' config's READ ONLY attribute as 'True'.")
putHiveInteractiveEnvProperty('enable_hive_interactive', 'false')
putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "true")
if hsi_properties and "hive.llap.zk.sm.connectionString" in hsi_properties:
zookeeper_host_port = self.getZKHostPortString(services)
if zookeeper_host_port:
putHiveInteractiveSiteProperty("hive.llap.zk.sm.connectionString", zookeeper_host_port)
def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
hsi_env_poperties = self.getServicesSiteProperties(services, "hive-interactive-env")
cluster_env = self.getServicesSiteProperties(services, "cluster-env")
# Queue 'llap' creation/removal logic (Used by Hive Interactive server and associated LLAP)
if hsi_env_poperties and 'enable_hive_interactive' in hsi_env_poperties:
enable_hive_interactive = hsi_env_poperties['enable_hive_interactive']
LLAP_QUEUE_NAME = 'llap'
# Hive Server interactive is already added or getting added
if enable_hive_interactive == 'true':
self.updateLlapConfigs(configurations, services, hosts, LLAP_QUEUE_NAME)
else: # When Hive Interactive Server is in 'off/removed' state.
self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
stack_root = "/usr/lib"
if cluster_env and "stack_root" in cluster_env:
stack_root = cluster_env["stack_root"]
timeline_plugin_classes_values = []
timeline_plugin_classpath_values = []
if self.__isServiceDeployed(services, "TEZ"):
timeline_plugin_classes_values.append('org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl')
if self.__isServiceDeployed(services, "SPARK2"):
timeline_plugin_classes_values.append('org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
timeline_plugin_classpath_values.append(stack_root + "/spark/jars/*")
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', ",".join(timeline_plugin_classes_values))
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath', ":".join(timeline_plugin_classpath_values))
def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):
"""
Entry point for updating Hive's 'LLAP app' configs namely :
(1). num_llap_nodes (2). hive.llap.daemon.yarn.container.mb
(3). hive.llap.daemon.num.executors (4). hive.llap.io.memory.size (5). llap_heap_size (6). slider_am_container_mb,
(7). hive.server2.tez.sessions.per.default.queue, (8). tez.am.resource.memory.mb (9). hive.tez.container.size
(10). tez.runtime.io.sort.mb (11). tez.runtime.unordered.output.buffer.size-mb (12). hive.llap.io.threadpool.size, and
(13). hive.llap.io.enabled.
The trigger point for updating LLAP configs (mentioned above) is change in values of any of the following:
(1). 'enable_hive_interactive' set to 'true' (2). 'num_llap_nodes' (3). 'hive.server2.tez.sessions.per.default.queue'
(4). Change in queue selection for config 'hive.llap.daemon.queue.name'.
If change in value for 'num_llap_nodes' or 'hive.server2.tez.sessions.per.default.queue' is detected, that config
value is not calulated, but read and use in calculation for dependent configs.
Note: All memory calculations are in MB, unless specified otherwise.
"""
Logger.info("DBG: Entered updateLlapConfigs");
# Determine if we entered here during cluster creation.
operation = getUserOperationContext(services, "operation")
is_cluster_create_opr = False
if operation == self.CLUSTER_CREATE_OPERATION:
is_cluster_create_opr = True
Logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr))
putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
putTezInteractiveSiteProperty = self.putProperty(configurations, "tez-interactive-site", services)
llap_daemon_selected_queue_name = None
selected_queue_is_ambari_managed_llap = None # Queue named 'llap' at root level is Ambari managed.
llap_selected_queue_am_percent = None
DEFAULT_EXECUTOR_TO_AM_RATIO = 20
MIN_EXECUTOR_TO_AM_RATIO = 10
MAX_CONCURRENT_QUERIES = 32
MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS = 4 # Concurrency for clusters with <10 executors
leafQueueNames = None
MB_TO_BYTES = 1048576
hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
yarn_site = self.getServicesSiteProperties(services, "yarn-site")
min_memory_required = 0
# Update 'hive.llap.daemon.queue.name' prop combo entries
self.setLlapDaemonQueuePropAttributes(services, configurations)
if not services["changed-configurations"]:
read_llap_daemon_yarn_cont_mb = long(self.get_yarn_min_container_size(services, configurations))
putHiveInteractiveSiteProperty("hive.llap.daemon.yarn.container.mb", read_llap_daemon_yarn_cont_mb)
if hsi_site and "hive.llap.daemon.queue.name" in hsi_site:
llap_daemon_selected_queue_name = hsi_site["hive.llap.daemon.queue.name"]
# Update Visibility of 'num_llap_nodes' slider. Visible only if selected queue is Ambari created 'llap'.
capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
if capacity_scheduler_properties:
# Get all leaf queues.
leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
Logger.info("YARN leaf Queues = {0}".format(leafQueueNames))
if len(leafQueueNames) == 0:
Logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.")
return
# Check if it's 1st invocation after enabling Hive Server Interactive (config: enable_hive_interactive).
changed_configs_has_enable_hive_int = self.isConfigPropertiesChanged(services, "hive-interactive-env", ['enable_hive_interactive'], False)
llap_named_queue_selected_in_curr_invocation = None
# Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
# OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case)
if (changed_configs_has_enable_hive_int or 0 == len(services['changed-configurations']))\
and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
if len(leafQueueNames) == 1 or (len(leafQueueNames) == 2 and llap_queue_name in leafQueueNames):
llap_named_queue_selected_in_curr_invocation = True
putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', llap_queue_name)
putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', llap_queue_name)
else:
first_leaf_queue = list(leafQueueNames)[0] # 1st invocation, pick the 1st leaf queue and set it as selected.
putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', first_leaf_queue)
putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', first_leaf_queue)
llap_named_queue_selected_in_curr_invocation = False
Logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation))
if (len(leafQueueNames) == 2 and (llap_daemon_selected_queue_name and llap_daemon_selected_queue_name == llap_queue_name) or
llap_named_queue_selected_in_curr_invocation) or \
(len(leafQueueNames) == 1 and llap_daemon_selected_queue_name == 'default' and llap_named_queue_selected_in_curr_invocation):
Logger.info("DBG: Setting 'num_llap_nodes' config's READ ONLY attribute as 'False'.")
putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "false")
selected_queue_is_ambari_managed_llap = True
Logger.info("DBG: Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
"slider visibility to 'True'".format(llap_queue_name, list(leafQueueNames)))
else:
Logger.info("DBG: Setting 'num_llap_nodes' config's READ ONLY attribute as 'True'.")
putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "true")
Logger.info("Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
"visibility to 'False'.".format(llap_daemon_selected_queue_name, list(leafQueueNames)))
selected_queue_is_ambari_managed_llap = False
if not llap_named_queue_selected_in_curr_invocation: # We would be creating the 'llap' queue later. Thus, cap-sched doesn't have
# state information pertaining to 'llap' queue.
# Check: State of the selected queue should not be STOPPED.
if llap_daemon_selected_queue_name:
llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
if llap_selected_queue_state is None or llap_selected_queue_state == "STOPPED":
Logger.error("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default "
"values.".format(llap_daemon_selected_queue_name, llap_selected_queue_state))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
else:
Logger.error("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
.format(llap_daemon_selected_queue_name))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
else:
Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
" Not calculating LLAP configs.")
return
changed_configs_in_hive_int_env = None
llap_concurrency_in_changed_configs = None
llap_daemon_queue_in_changed_configs = None
# Calculations are triggered only if there is change in any one of the following props :
# 'num_llap_nodes', 'enable_hive_interactive', 'hive.server2.tez.sessions.per.default.queue'
# or 'hive.llap.daemon.queue.name' has change in value selection.
# OR
# services['changed-configurations'] is empty implying that this is the Blueprint call. (1st invocation)
if 'changed-configurations' in services.keys():
config_names_to_be_checked = set(['num_llap_nodes', 'enable_hive_interactive'])
changed_configs_in_hive_int_env = self.isConfigPropertiesChanged(services, "hive-interactive-env", config_names_to_be_checked, False)
# Determine if there is change detected in "hive-interactive-site's" configs based on which we calculate llap configs.
llap_concurrency_in_changed_configs = self.isConfigPropertiesChanged(services, self.HIVE_INTERACTIVE_SITE, ['hive.server2.tez.sessions.per.default.queue'], False)
llap_daemon_queue_in_changed_configs = self.isConfigPropertiesChanged(services, self.HIVE_INTERACTIVE_SITE, ['hive.llap.daemon.queue.name'], False)
if not changed_configs_in_hive_int_env and not llap_concurrency_in_changed_configs and \
not llap_daemon_queue_in_changed_configs and services["changed-configurations"]:
Logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.")
Logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
return
Logger.info("\nDBG: Performing LLAP config calculations ......")
node_manager_host_list = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
node_manager_cnt = len(node_manager_host_list)
yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
total_cluster_capacity = node_manager_cnt * yarn_nm_mem_in_mb
Logger.info("DBG: Calculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
"yarn_nm_mem_in_mb : {2}".format(total_cluster_capacity, node_manager_cnt, yarn_nm_mem_in_mb))
yarn_min_container_size = float(self.get_yarn_min_container_size(services, configurations))
tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_capacity), is_cluster_create_opr,
changed_configs_has_enable_hive_int)
normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)
if yarn_site and "yarn.nodemanager.resource.cpu-vcores" in yarn_site:
cpu_per_nm_host = float(yarn_site["yarn.nodemanager.resource.cpu-vcores"])
else:
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
"total_cluster_capacity : {2}".format(normalized_tez_am_container_size, tez_am_container_size,
total_cluster_capacity))
# Calculate the available memory for LLAP app
yarn_nm_mem_in_mb_normalized = self._normalizeDown(yarn_nm_mem_in_mb, yarn_min_container_size)
mem_per_thread_for_llap = float(self.calculate_mem_per_thread_for_llap(services, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host,
is_cluster_create_opr, changed_configs_has_enable_hive_int))
Logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, "
"cpu_per_nm_host : {2}".format(mem_per_thread_for_llap, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host))
if mem_per_thread_for_llap is None:
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
# Get calculated value for Slider AM container Size
slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
yarn_min_container_size)
Logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size : "
"{1}".format(slider_am_container_size, yarn_min_container_size))
min_memory_required = normalized_tez_am_container_size + slider_am_container_size + self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
Logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size: {1}, "
"normalized_tez_am_container_size : {2}, mem_per_thread_for_llap : {3}, yarn_min_container_size : "
"{4}".format(min_memory_required, slider_am_container_size, normalized_tez_am_container_size, mem_per_thread_for_llap, yarn_min_container_size))
min_nodes_required = int(math.ceil( min_memory_required / yarn_nm_mem_in_mb_normalized))
Logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required : {1}, yarn_nm_mem_in_mb_normalized "
": {2}".format(min_nodes_required, min_memory_required, yarn_nm_mem_in_mb_normalized))
if min_nodes_required > node_manager_cnt:
Logger.warning("ERROR: Not enough memory/nodes to run LLAP");
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
mem_per_thread_for_llap = float(mem_per_thread_for_llap)
Logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
if not selected_queue_is_ambari_managed_llap:
llap_daemon_selected_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_daemon_selected_queue_name, total_cluster_capacity)
if llap_daemon_selected_queue_cap <= 0:
Logger.warning("'{0}' queue capacity percentage retrieved = {1}. Expected > 0.".format(
llap_daemon_selected_queue_name, llap_daemon_selected_queue_cap))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
total_llap_mem_normalized = self._normalizeDown(llap_daemon_selected_queue_cap, yarn_min_container_size)
Logger.info("DBG: Calculated '{0}' queue available capacity : {1}, using following: llap_daemon_selected_queue_cap : {2}, "
"yarn_min_container_size : {3}".format(llap_daemon_selected_queue_name, total_llap_mem_normalized,
llap_daemon_selected_queue_cap, yarn_min_container_size))
'''Rounding up numNodes so that we run more daemons, and utilitze more CPUs. The rest of the calcaulations will take care of cutting this down if required'''
num_llap_nodes_requested = math.ceil(total_llap_mem_normalized / yarn_nm_mem_in_mb_normalized)
Logger.info("DBG: Calculated 'num_llap_nodes_requested' : {0}, using following: total_llap_mem_normalized : {1}, "
"yarn_nm_mem_in_mb_normalized : {2}".format(num_llap_nodes_requested, total_llap_mem_normalized, yarn_nm_mem_in_mb_normalized))
# Pouplate the 'num_llap_nodes_requested' in config 'num_llap_nodes', a read only config for non-Ambari managed queue case.
putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes_requested)
Logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested))
queue_am_fraction_perc = float(self.__getQueueAmFractionFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name))
hive_tez_am_cap_available = queue_am_fraction_perc * total_llap_mem_normalized
Logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, "
"total_llap_mem_normalized : {2}".format(hive_tez_am_cap_available, queue_am_fraction_perc, total_llap_mem_normalized))
else: # Ambari managed 'llap' named queue at root level.
# Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
# Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
# OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case)
if (changed_configs_has_enable_hive_int or 0 == len(services['changed-configurations'])) \
and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
num_llap_nodes_requested = min_nodes_required
else:
num_llap_nodes_requested = self.get_num_llap_nodes(services, configurations) #Input
total_llap_mem = num_llap_nodes_requested * yarn_nm_mem_in_mb_normalized
Logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested : {1}, "
"yarn_nm_mem_in_mb_normalized : {2}".format(total_llap_mem, num_llap_nodes_requested, yarn_nm_mem_in_mb_normalized))
total_llap_mem_normalized = float(self._normalizeDown(total_llap_mem, yarn_min_container_size))
Logger.info("DBG: Calculated 'total_llap_mem_normalized' : {0}, using following: total_llap_mem : {1}, "
"yarn_min_container_size : {2}".format(total_llap_mem_normalized, total_llap_mem, yarn_min_container_size))
# What percent is 'total_llap_mem' of 'total_cluster_capacity' ?
llap_named_queue_cap_fraction = math.ceil(total_llap_mem_normalized / total_cluster_capacity * 100)
Logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction))
if llap_named_queue_cap_fraction > 100:
Logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
# Adjust capacity scheduler for the 'llap' named queue.
self.checkAndManageLlapQueue(services, configurations, hosts, llap_queue_name, llap_named_queue_cap_fraction)
hive_tez_am_cap_available = total_llap_mem_normalized
Logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available))
# Common calculations now, irrespective of the queue selected.
llap_mem_for_tezAm_and_daemons = total_llap_mem_normalized - slider_am_container_size
Logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following : total_llap_mem_normalized : {1}, "
"slider_am_container_size : {2}".format(llap_mem_for_tezAm_and_daemons, total_llap_mem_normalized, slider_am_container_size))
if llap_mem_for_tezAm_and_daemons < 2 * yarn_min_container_size:
Logger.warning("Not enough capacity available on the cluster to run LLAP")
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
# Calculate llap concurrency (i.e. Number of Tez AM's)
max_executors_per_node = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
# Read 'hive.server2.tez.sessions.per.default.queue' prop if it's in changed-configs, else calculate it.
if not llap_concurrency_in_changed_configs:
if max_executors_per_node <= 0:
Logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Calculated 'max_executors_per_node' : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
"mem_per_thread_for_llap: {3}".format(max_executors_per_node, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
# Default 1 AM for every 20 executor threads.
# The second part of the min calculates based on mem required for DEFAULT_EXECUTOR_TO_AM_RATIO executors + 1 AM,
# making use of total memory. However, it's possible that total memory will not be used - and the numExecutors is
# instead limited by #CPUs. Use maxPerNode to factor this in.
llap_concurreny_limit = min(math.floor(max_executors_per_node * num_llap_nodes_requested / DEFAULT_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
Logger.info("DBG: Calculated 'llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested : {2}, DEFAULT_EXECUTOR_TO_AM_RATIO "
": {3}, MAX_CONCURRENT_QUERIES : {4}".format(llap_concurreny_limit, max_executors_per_node, num_llap_nodes_requested, DEFAULT_EXECUTOR_TO_AM_RATIO, MAX_CONCURRENT_QUERIES))
llap_concurrency = min(llap_concurreny_limit, math.floor(llap_mem_for_tezAm_and_daemons / (DEFAULT_EXECUTOR_TO_AM_RATIO * mem_per_thread_for_llap + normalized_tez_am_container_size)))
Logger.info("DBG: Calculated 'llap_concurrency' : {0}, using following : llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
"{2}, DEFAULT_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
"{5}".format(llap_concurrency, llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, DEFAULT_EXECUTOR_TO_AM_RATIO,
mem_per_thread_for_llap, normalized_tez_am_container_size))
if llap_concurrency == 0:
llap_concurrency = 1
Logger.info("DBG: Readjusted 'llap_concurrency' to : 1. Earlier calculated value : 0")
if llap_concurrency * normalized_tez_am_container_size > hive_tez_am_cap_available:
llap_concurrency = long(math.floor(hive_tez_am_cap_available / normalized_tez_am_container_size))
Logger.info("DBG: Readjusted 'llap_concurrency' to : {0}, as llap_concurrency({1}) * normalized_tez_am_container_size({2}) > hive_tez_am_cap_available({3}))"
.format(llap_concurrency, llap_concurrency, normalized_tez_am_container_size, hive_tez_am_cap_available))
if llap_concurrency <= 0:
Logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
"{2}".format(llap_concurrency, hive_tez_am_cap_available, normalized_tez_am_container_size))
else:
# Read current value
if 'hive.server2.tez.sessions.per.default.queue' in hsi_site:
llap_concurrency = long(hsi_site['hive.server2.tez.sessions.per.default.queue'])
if llap_concurrency <= 0:
Logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency ))
else:
llap_concurrency = 1
Logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.")
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
# Calculate 'Max LLAP Consurrency', irrespective of whether 'llap_concurrency' was read or calculated.
max_llap_concurreny_limit = min(math.floor(max_executors_per_node * num_llap_nodes_requested / MIN_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
Logger.info("DBG: Calculated 'max_llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested "
": {2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, MAX_CONCURRENT_QUERIES : {4}".format(max_llap_concurreny_limit, max_executors_per_node,
num_llap_nodes_requested, MIN_EXECUTOR_TO_AM_RATIO,
MAX_CONCURRENT_QUERIES))
max_llap_concurreny = long(min(max_llap_concurreny_limit, math.floor(llap_mem_for_tezAm_and_daemons / (MIN_EXECUTOR_TO_AM_RATIO *
mem_per_thread_for_llap + normalized_tez_am_container_size))))
Logger.info("DBG: Calculated 'max_llap_concurreny' : {0}, using following : max_llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
"{2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
"{5}".format(max_llap_concurreny, max_llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, MIN_EXECUTOR_TO_AM_RATIO,
mem_per_thread_for_llap, normalized_tez_am_container_size))
if int(max_llap_concurreny) < MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS:
Logger.info("DBG: Adjusting 'max_llap_concurreny' from {0} to {1}".format(max_llap_concurreny, MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS))
max_llap_concurreny = MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS
if (max_llap_concurreny * normalized_tez_am_container_size) > hive_tez_am_cap_available:
max_llap_concurreny = math.floor(hive_tez_am_cap_available / normalized_tez_am_container_size)
if max_llap_concurreny <= 0:
Logger.warning("Calculated 'Max. LLAP Concurrent Queries' = {0}. Expected value > 1".format(max_llap_concurreny))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Adjusted 'max_llap_concurreny' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
"{2}".format(max_llap_concurreny, hive_tez_am_cap_available, normalized_tez_am_container_size))
# Calculate value for 'num_llap_nodes', an across cluster config.
tez_am_memory_required = llap_concurrency * normalized_tez_am_container_size
Logger.info("DBG: Calculated 'tez_am_memory_required' : {0}, using following : llap_concurrency : {1}, normalized_tez_am_container_size : "
"{2}".format(tez_am_memory_required, llap_concurrency, normalized_tez_am_container_size))
llap_mem_daemon_size = llap_mem_for_tezAm_and_daemons - tez_am_memory_required
if llap_mem_daemon_size < yarn_min_container_size:
Logger.warning("Calculated 'LLAP Daemon Size = {0}'. Expected >= 'YARN Minimum Container Size' ({1})'".format(
llap_mem_daemon_size, yarn_min_container_size))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
if llap_mem_daemon_size < mem_per_thread_for_llap or llap_mem_daemon_size < yarn_min_container_size:
Logger.warning("Not enough memory available for executors.")
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Calculated 'llap_mem_daemon_size' : {0}, using following : llap_mem_for_tezAm_and_daemons : {1}, tez_am_memory_required : "
"{2}".format(llap_mem_daemon_size, llap_mem_for_tezAm_and_daemons, tez_am_memory_required))
llap_daemon_mem_per_node = self._normalizeDown(llap_mem_daemon_size / num_llap_nodes_requested, yarn_min_container_size)
# This value takes into account total cluster capacity, and may not have left enough capcaity on each node to launch an AM.
Logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
"yarn_min_container_size: {3}".format(llap_daemon_mem_per_node, llap_mem_daemon_size, num_llap_nodes_requested, yarn_min_container_size))
if llap_daemon_mem_per_node == 0:
# Small cluster. No capacity left on a node after running AMs.
llap_daemon_mem_per_node = self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
num_llap_nodes = math.floor(llap_mem_daemon_size / llap_daemon_mem_per_node)
Logger.info("DBG: 'llap_daemon_mem_per_node' : 0, adjusted 'llap_daemon_mem_per_node' : {0}, 'num_llap_nodes' : {1}, using following: llap_mem_daemon_size : {2}, "
"mem_per_thread_for_llap : {3}".format(llap_daemon_mem_per_node, num_llap_nodes, llap_mem_daemon_size, mem_per_thread_for_llap))
elif llap_daemon_mem_per_node < mem_per_thread_for_llap:
# Previously computed value of memory per thread may be too high. Cut the number of nodes. (Alternately reduce memory per node)
llap_daemon_mem_per_node = mem_per_thread_for_llap
num_llap_nodes = math.floor(llap_mem_daemon_size / mem_per_thread_for_llap)
Logger.info("DBG: 'llap_daemon_mem_per_node'({0}) < mem_per_thread_for_llap({1}), adjusted 'llap_daemon_mem_per_node' "
": {2}".format(llap_daemon_mem_per_node, mem_per_thread_for_llap, llap_daemon_mem_per_node))
else:
# All good. We have a proper value for memoryPerNode.
num_llap_nodes = num_llap_nodes_requested
Logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
# Make sure we have enough memory on each node to run AMs.
# If nodes vs nodes_requested is different - AM memory is already factored in.
# If llap_node_count < total_cluster_nodes - assuming AMs can run on a different node.
# Else factor in min_concurrency_per_node * tez_am_size, and slider_am_size
# Also needs to factor in whether num_llap_nodes = cluster_node_count
min_mem_reserved_per_node = 0
if num_llap_nodes == num_llap_nodes_requested and num_llap_nodes == node_manager_cnt:
min_mem_reserved_per_node = max(normalized_tez_am_container_size, slider_am_container_size)
tez_AMs_per_node = llap_concurrency / num_llap_nodes
tez_AMs_per_node_low = int(math.floor(tez_AMs_per_node))
tez_AMs_per_node_high = int(math.ceil(tez_AMs_per_node))
min_mem_reserved_per_node = int(max(tez_AMs_per_node_high * normalized_tez_am_container_size, tez_AMs_per_node_low * normalized_tez_am_container_size + slider_am_container_size))
Logger.info("DBG: Determined 'AM reservation per node': {0}, using following : concurrency: {1}, num_llap_nodes: {2}, AMsPerNode: {3}"
.format(min_mem_reserved_per_node, llap_concurrency, num_llap_nodes, tez_AMs_per_node))
max_single_node_mem_available_for_daemon = self._normalizeDown(yarn_nm_mem_in_mb_normalized - min_mem_reserved_per_node, yarn_min_container_size)
if max_single_node_mem_available_for_daemon <=0 or max_single_node_mem_available_for_daemon < mem_per_thread_for_llap:
Logger.warning("Not enough capacity available per node for daemons after factoring in AM memory requirements. NM Mem: {0}, "
"minAMMemPerNode: {1}, available: {2}".format(yarn_nm_mem_in_mb_normalized, min_mem_reserved_per_node, max_single_node_mem_available_for_daemon))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
llap_daemon_mem_per_node = min(max_single_node_mem_available_for_daemon, llap_daemon_mem_per_node)
Logger.info("DBG: Determined final memPerDaemon: {0}, using following: concurrency: {1}, numNMNodes: {2}, numLlapNodes: {3} "
.format(llap_daemon_mem_per_node, llap_concurrency, node_manager_cnt, num_llap_nodes))
num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
if num_executors_per_node_max < 1:
Logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
"mem_per_thread_for_llap: {3}".format(num_executors_per_node_max, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
# NumExecutorsPerNode is not necessarily max - since some capacity would have been reserved for AMs, if this value were based on mem.
num_executors_per_node = min(math.floor(llap_daemon_mem_per_node / mem_per_thread_for_llap), num_executors_per_node_max)
if num_executors_per_node <= 0:
Logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node))
self.recommendDefaultLlapConfiguration(configurations, services, hosts)
return
Logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, "
"mem_per_thread_for_llap: {3}".format(num_executors_per_node, llap_daemon_mem_per_node, num_executors_per_node_max, mem_per_thread_for_llap))
# Now figure out how much of the memory will be used by the executors, and how much will be used by the cache.
total_mem_for_executors_per_node = num_executors_per_node * mem_per_thread_for_llap
cache_mem_per_node = llap_daemon_mem_per_node - total_mem_for_executors_per_node
Logger.info("DBG: Calculated 'Cache per node' : {0}, using following : llap_daemon_mem_per_node : {1}, total_mem_for_executors_per_node : {2}"
.format(cache_mem_per_node, llap_daemon_mem_per_node, total_mem_for_executors_per_node))
tez_runtime_io_sort_mb = (long((0.8 * mem_per_thread_for_llap) / 3))
tez_runtime_unordered_output_buffer_size = long(0.8 * 0.075 * mem_per_thread_for_llap)
# 'hive_auto_convert_join_noconditionaltask_size' value is in bytes. Thus, multiplying it by 1048576.
hive_auto_convert_join_noconditionaltask_size = (long((0.8 * mem_per_thread_for_llap) / 3)) * MB_TO_BYTES
# Calculate value for prop 'llap_heap_size'
llap_xmx = max(total_mem_for_executors_per_node * 0.8, total_mem_for_executors_per_node - self.get_llap_headroom_space(services, configurations))
Logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node))
# Calculate 'hive_heapsize' for Hive2/HiveServer2 (HSI)
hive_server_interactive_heapsize = None
hive_server_interactive_hosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER_INTERACTIVE", services, hosts)
if hive_server_interactive_hosts is None:
# If its None, read the base service YARN's NODEMANAGER node memory, as are host are considered homogenous.
hive_server_interactive_hosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
if hive_server_interactive_hosts is not None and len(hive_server_interactive_hosts) > 0:
host_mem = long(hive_server_interactive_hosts[0]["Hosts"]["total_mem"])
hive_server_interactive_heapsize = min(max(2048.0, 400.0*llap_concurrency), 3.0/8 * host_mem)
Logger.info("DBG: Calculated 'hive_server_interactive_heapsize' : {0}, using following : llap_concurrency : {1}, host_mem : "
"{2}".format(hive_server_interactive_heapsize, llap_concurrency, host_mem))
# Done with calculations, updating calculated configs.
Logger.info("DBG: Applying the calculated values....")
if is_cluster_create_opr or changed_configs_has_enable_hive_int:
normalized_tez_am_container_size = long(normalized_tez_am_container_size)
putTezInteractiveSiteProperty('tez.am.resource.memory.mb', normalized_tez_am_container_size)
Logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size))
if not llap_concurrency_in_changed_configs:
min_llap_concurrency = 1
putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
min_llap_concurrency)
putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", max_llap_concurreny)
num_llap_nodes = long(num_llap_nodes)
putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", min_nodes_required)
putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt)
#TODO A single value is not being set for numNodes in case of a custom queue. Also the attribute is set to non-visible, so the UI likely ends up using an old cached value
if (num_llap_nodes != num_llap_nodes_requested):
Logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
else:
Logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
# Safeguard for not adding "num_llap_nodes_for_llap_daemons" if it doesnt exist in hive-interactive-site.
# This can happen if we upgrade from Ambari 2.4 (with HDP 2.5) to Ambari 2.5, as this config is from 2.6 stack onwards only.
if "hive-interactive-env" in services["configurations"] and \
"num_llap_nodes_for_llap_daemons" in services["configurations"]["hive-interactive-env"]["properties"]:
putHiveInteractiveEnvProperty('num_llap_nodes_for_llap_daemons', num_llap_nodes)
Logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes))
llap_container_size = long(llap_daemon_mem_per_node)
putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)
# Set 'hive.tez.container.size' only if it is read as "SET_ON_FIRST_INVOCATION", implying initialization.
# Else, we don't (1). Override the previous calculated value or (2). User provided value.
if is_cluster_create_opr or changed_configs_has_enable_hive_int:
mem_per_thread_for_llap = long(mem_per_thread_for_llap)
putHiveInteractiveSiteProperty('hive.tez.container.size', mem_per_thread_for_llap)
Logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap))
putTezInteractiveSiteProperty('tez.runtime.io.sort.mb', tez_runtime_io_sort_mb)
if "tez-site" in services["configurations"] and "tez.runtime.sorter.class" in services["configurations"]["tez-site"]["properties"]:
if services["configurations"]["tez-site"]["properties"]["tez.runtime.sorter.class"] == "LEGACY":
putTezInteractiveSiteProperty("tez.runtime.io.sort.mb", "maximum", 1800)
putTezInteractiveSiteProperty('tez.runtime.unordered.output.buffer.size-mb', tez_runtime_unordered_output_buffer_size)
putHiveInteractiveSiteProperty('hive.auto.convert.join.noconditionaltask.size', hive_auto_convert_join_noconditionaltask_size)
num_executors_per_node = long(num_executors_per_node)
Logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node))
putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node)
putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "maximum", long(num_executors_per_node_max))
# 'hive.llap.io.threadpool.size' config value is to be set same as value calculated for
# 'hive.llap.daemon.num.executors' at all times.
cache_mem_per_node = long(cache_mem_per_node)
putHiveInteractiveSiteProperty('hive.llap.io.threadpool.size', num_executors_per_node)
putHiveInteractiveSiteProperty('hive.llap.io.memory.size', cache_mem_per_node)
if hive_server_interactive_heapsize is not None:
putHiveInteractiveEnvProperty("hive_heapsize", int(hive_server_interactive_heapsize))
llap_io_enabled = 'true' if long(cache_mem_per_node) >= 64 else 'false'
putHiveInteractiveSiteProperty('hive.llap.io.enabled', llap_io_enabled)
putHiveInteractiveEnvProperty('llap_heap_size', long(llap_xmx))
putHiveInteractiveEnvProperty('slider_am_container_mb', long(slider_am_container_size))
Logger.info("DBG: Done putting all configs")
#TODO: What is this doing? What error will be displayed on the UI if something like this is hit?
def recommendDefaultLlapConfiguration(self, configurations, services, hosts):
Logger.info("DBG: Something likely went wrong. recommendDefaultLlapConfiguration")
putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
yarn_min_container_size = long(self.get_yarn_min_container_size(services, configurations))
slider_am_container_size = long(self.calculate_slider_am_size(yarn_min_container_size))
node_manager_host_list = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
node_manager_cnt = len(node_manager_host_list)
putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', 1)
putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", 1)
putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", 1)
putHiveInteractiveEnvProperty('num_llap_nodes', 0)
# Safeguard for not adding "num_llap_nodes_for_llap_daemons" if it doesnt exist in hive-interactive-site.
# This can happen if we upgrade from Ambari 2.4 (with HDP 2.5) to Ambari 2.5, as this config is from 2.6 stack onwards only.
if "hive-interactive-env" in services["configurations"] and \
"num_llap_nodes_for_llap_daemons" in services["configurations"]["hive-interactive-env"]["properties"]:
putHiveInteractiveEnvProperty('num_llap_nodes_for_llap_daemons', 0)
putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", 1)
putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt)
#putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', yarn_min_container_size)
putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.yarn.container.mb', "minimum", yarn_min_container_size)
#putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', 0)
putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
#putHiveInteractiveSiteProperty('hive.llap.io.threadpool.size', 0)
#putHiveInteractiveSiteProperty('hive.llap.io.memory.size', 0)
#putHiveInteractiveEnvProperty('llap_heap_size', 0)
putHiveInteractiveEnvProperty('slider_am_container_mb', slider_am_container_size)
def isConfigPropertiesChanged(self, services, config_type, config_names, all_exists=True):
"""
Checks for the presence of passed-in configuration properties in a given config, if they are changed.
Reads from services["changed-configurations"].
:argument services: Configuration information for the cluster
:argument config_type: Type of the configuration
:argument config_names: Set of configuration properties to be checked if they are changed.
:argument all_exists: If True: returns True only if all properties mentioned in 'config_names_set' we found
in services["changed-configurations"].
Otherwise, returns False.
If False: return True, if any of the properties mentioned in config_names_set we found in
services["changed-configurations"].
Otherwise, returns False.
:type services: dict
:type config_type: str
:type config_names: list|set
:type all_exists: bool
"""
changedConfigs = services["changed-configurations"]
changed_config_names_set = set([changedConfig['name'] for changedConfig in changedConfigs if changedConfig['type'] == config_type])
config_names_set = set(config_names)
configs_intersection = changed_config_names_set & config_names_set
if all_exists and configs_intersection == config_names_set:
return True
elif not all_exists and len(configs_intersection) > 0:
return True
return False
def get_num_llap_nodes(self, services, configurations):
"""
Returns current value of number of LLAP nodes in cluster (num_llap_nodes)
:type services: dict
:type configurations: dict
:rtype int
"""
hsi_env = self.getServicesSiteProperties(services, "hive-interactive-env")
hsi_env_properties = self.getSiteProperties(configurations, "hive-interactive-env")
num_llap_nodes = 0
# Check if 'num_llap_nodes' is modified in current ST invocation.
if hsi_env_properties and 'num_llap_nodes' in hsi_env_properties:
num_llap_nodes = hsi_env_properties['num_llap_nodes']
elif hsi_env and 'num_llap_nodes' in hsi_env:
num_llap_nodes = hsi_env['num_llap_nodes']
else:
Logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes))
return float(num_llap_nodes)
def get_max_executors_per_node(self, nm_mem_per_node_normalized, nm_cpus_per_node, mem_per_thread):
# TODO: This potentially takes up the entire node leaving no space for AMs.
return min(math.floor(nm_mem_per_node_normalized / mem_per_thread), nm_cpus_per_node)
def calculate_mem_per_thread_for_llap(self, services, nm_mem_per_node_normalized, cpu_per_nm_host, is_cluster_create_opr=False,
enable_hive_interactive_1st_invocation=False):
"""
Calculates 'mem_per_thread_for_llap' for 1st time initialization. Else returns 'hive.tez.container.size' read value.
"""
hive_tez_container_size = self.get_hive_tez_container_size(services)
if is_cluster_create_opr or enable_hive_interactive_1st_invocation:
if nm_mem_per_node_normalized <= 1024:
calculated_hive_tez_container_size = min(512, nm_mem_per_node_normalized)
elif nm_mem_per_node_normalized <= 4096:
calculated_hive_tez_container_size = 1024
elif nm_mem_per_node_normalized <= 10240:
calculated_hive_tez_container_size = 2048
elif nm_mem_per_node_normalized <= 24576:
calculated_hive_tez_container_size = 3072
else:
calculated_hive_tez_container_size = 4096
Logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size))
return calculated_hive_tez_container_size
else:
Logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size))
return hive_tez_container_size
def get_hive_tez_container_size(self, services):
"""
Gets HIVE Tez container size (hive.tez.container.size).
"""
hive_container_size = None
hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
if hsi_site and 'hive.tez.container.size' in hsi_site:
hive_container_size = hsi_site['hive.tez.container.size']
if not hive_container_size:
# This can happen (1). If config is missing in hive-interactive-site or (2). its an
# upgrade scenario from Ambari 2.4 to Ambari 2.5 with HDP 2.5 installed. Read it
# from hive-site.
#
# If Ambari 2.5 after upgrade from 2.4 is managing HDP 2.6 here, this config would have
# already been added in hive-interactive-site as part of HDP upgrade from 2.5 to 2.6,
# and we wont end up in this block to look up in hive-site.
hive_site = self.getServicesSiteProperties(services, "hive-site")
if hive_site and 'hive.tez.container.size' in hive_site:
hive_container_size = hive_site['hive.tez.container.size']
return hive_container_size
def get_llap_headroom_space(self, services, configurations):
"""
Gets HIVE Server Interactive's 'llap_headroom_space' config. (Default value set to 6144 bytes).
"""
llap_headroom_space = None
# Check if 'llap_headroom_space' is modified in current SA invocation.
if 'hive-interactive-env' in configurations and 'llap_headroom_space' in configurations['hive-interactive-env']['properties']:
hive_container_size = float(configurations['hive-interactive-env']['properties']['llap_headroom_space'])
Logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
if llap_headroom_space is None:
# Check if 'llap_headroom_space' is input in services array.
if 'llap_headroom_space' in services['configurations']['hive-interactive-env']['properties']:
llap_headroom_space = float(services['configurations']['hive-interactive-env']['properties']['llap_headroom_space'])
Logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
if not llap_headroom_space or llap_headroom_space < 1:
llap_headroom_space = 6144 # 6GB
Logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
return llap_headroom_space
#TODO Convert this to a helper. It can apply to any property. Check config, or check if in the list of changed configurations and read the latest value
def get_yarn_min_container_size(self, services, configurations):
"""
Gets YARN's minimum container size (yarn.scheduler.minimum-allocation-mb).
Reads from:
- configurations (if changed as part of current Stack Advisor invocation (output)), and services["changed-configurations"]
is empty, else
- services['configurations'] (input).
services["changed-configurations"] would be empty if Stack Advisor call is made from Blueprints (1st invocation). Subsequent
Stack Advisor calls will have it non-empty. We do this because in subsequent invocations, even if Stack Advisor calculates this
value (configurations), it is finally not recommended, making 'input' value to survive.
:type services dict
:type configurations dict
:rtype str
"""
yarn_min_container_size = None
yarn_min_allocation_property = "yarn.scheduler.minimum-allocation-mb"
yarn_site = self.getSiteProperties(configurations, "yarn-site")
yarn_site_properties = self.getServicesSiteProperties(services, "yarn-site")
# Check if services["changed-configurations"] is empty and 'yarn.scheduler.minimum-allocation-mb' is modified in current ST invocation.
if not services["changed-configurations"] and yarn_site and yarn_min_allocation_property in yarn_site:
yarn_min_container_size = yarn_site[yarn_min_allocation_property]
Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size))
# Check if 'yarn.scheduler.minimum-allocation-mb' is input in services array.
elif yarn_site_properties and yarn_min_allocation_property in yarn_site_properties:
yarn_min_container_size = yarn_site_properties[yarn_min_allocation_property]
Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
if not yarn_min_container_size:
Logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property))
return yarn_min_container_size
def calculate_slider_am_size(self, yarn_min_container_size):
"""
Calculates the Slider App Master size based on YARN's Minimum Container Size.
:type yarn_min_container_size int
"""
if yarn_min_container_size >= 1024:
return 1024
else:
return 512
def calculate_tez_am_container_size(self, services, total_cluster_capacity, is_cluster_create_opr=False, enable_hive_interactive_1st_invocation=False):
"""
Calculates Tez App Master container size (tez.am.resource.memory.mb) for tez_hive2/tez-site on initialization if values read is 0.
Else returns the read value.
"""
tez_am_resource_memory_mb = self.get_tez_am_resource_memory_mb(services)
calculated_tez_am_resource_memory_mb = None
if is_cluster_create_opr or enable_hive_interactive_1st_invocation:
if total_cluster_capacity <= 4096:
calculated_tez_am_resource_memory_mb = 512
elif total_cluster_capacity > 4096 and total_cluster_capacity <= 98304:
calculated_tez_am_resource_memory_mb = 1024
elif total_cluster_capacity > 98304:
calculated_tez_am_resource_memory_mb = 4096
Logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb))
return float(calculated_tez_am_resource_memory_mb)
else:
Logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb))
return float(tez_am_resource_memory_mb)
def get_tez_am_resource_memory_mb(self, services):
"""
Gets Tez's AM resource memory (tez.am.resource.memory.mb) from services.
"""
tez_am_resource_memory_mb = None
if 'tez.am.resource.memory.mb' in services['configurations']['tez-interactive-site']['properties']:
tez_am_resource_memory_mb = services['configurations']['tez-interactive-site']['properties']['tez.am.resource.memory.mb']
return tez_am_resource_memory_mb
def min_queue_perc_reqd_for_llap_and_hive_app(self, services, hosts, configurations):
"""
Calculate minimum queue capacity required in order to get LLAP and HIVE2 app into running state.
"""
# Get queue size if sized at 20%
node_manager_hosts = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
yarn_rm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
total_cluster_cap = len(node_manager_hosts) * yarn_rm_mem_in_mb
total_queue_size_at_20_perc = 20.0 / 100 * total_cluster_cap
# Calculate based on minimum size required by containers.
yarn_min_container_size = long(self.get_yarn_min_container_size(services, configurations))
slider_am_size = self.calculate_slider_am_size(float(yarn_min_container_size))
hive_tez_container_size = long(self.get_hive_tez_container_size(services))
tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_cap))
normalized_val = self._normalizeUp(slider_am_size, yarn_min_container_size) \
+ self._normalizeUp(hive_tez_container_size, yarn_min_container_size) \
+ self._normalizeUp(tez_am_container_size, yarn_min_container_size)
min_required = max(total_queue_size_at_20_perc, normalized_val)
min_required_perc = min_required * 100 / total_cluster_cap
return int(math.ceil(min_required_perc))
def _normalizeDown(self, val1, val2):
"""
Normalize down 'val2' with respect to 'val1'.
"""
tmp = math.floor(val1 / val2)
if tmp < 1.00:
return 0
return tmp * val2
def _normalizeUp(self, val1, val2):
"""
Normalize up 'val2' with respect to 'val1'.
"""
tmp = math.ceil(val1 / val2)
return tmp * val2
def checkAndManageLlapQueue(self, services, configurations, hosts, llap_queue_name, llap_queue_cap_perc):
"""
Checks and (1). Creates 'llap' queue if only 'default' queue exist at leaf level and is consuming 100% capacity OR
(2). Updates 'llap' queue capacity and state, if current selected queue is 'llap', and only 2 queues exist
at root level : 'default' and 'llap'.
"""
Logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
putCapSchedProperty = self.putProperty(configurations, "capacity-scheduler", services)
leafQueueNames = None
hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
if capacity_scheduler_properties:
leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
cap_sched_config_keys = capacity_scheduler_properties.keys()
yarn_default_queue_capacity = -1
if 'yarn.scheduler.capacity.root.default.capacity' in cap_sched_config_keys:
yarn_default_queue_capacity = float(capacity_scheduler_properties.get('yarn.scheduler.capacity.root.default.capacity'))
# Get 'llap' queue state
currLlapQueueState = ''
if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in cap_sched_config_keys:
currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state')
# Get 'llap' queue capacity
currLlapQueueCap = -1
if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity' in cap_sched_config_keys:
currLlapQueueCap = int(float(capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity')))
updated_cap_sched_configs_str = ''
enabled_hive_int_in_changed_configs = self.isConfigPropertiesChanged(services, "hive-interactive-env", ['enable_hive_interactive'], False)
"""
We create OR "modify 'llap' queue 'state and/or capacity' " based on below conditions:
- if only 1 queue exists at root level and is 'default' queue and has 100% cap -> Create 'llap' queue, OR
- if 2 queues exists at root level ('llap' and 'default') :
- Queue selected is 'llap' and state is STOPPED -> Modify 'llap' queue state to RUNNING, adjust capacity, OR
- Queue selected is 'llap', state is RUNNING and 'llap_queue_capacity' prop != 'llap' queue current running capacity ->
Modify 'llap' queue capacity to 'llap_queue_capacity'
"""
if 'default' in leafQueueNames and \
((len(leafQueueNames) == 1 and int(yarn_default_queue_capacity) == 100) or \
((len(leafQueueNames) == 2 and llap_queue_name in leafQueueNames) and \
((currLlapQueueState == 'STOPPED' and enabled_hive_int_in_changed_configs) or (currLlapQueueState == 'RUNNING' and currLlapQueueCap != llap_queue_cap_perc)))):
adjusted_default_queue_cap = str(100 - llap_queue_cap_perc)
hive_user = '*' # Open to all
if 'hive_user' in services['configurations']['hive-env']['properties']:
hive_user = services['configurations']['hive-env']['properties']['hive_user']
llap_queue_cap_perc = str(llap_queue_cap_perc)
# If capacity-scheduler configs are received as one concatenated string, we deposit the changed configs back as
# one concatenated string.
updated_cap_sched_configs_as_dict = False
if not received_as_key_value_pair:
for prop, val in capacity_scheduler_properties.items():
if llap_queue_name not in prop:
if prop == 'yarn.scheduler.capacity.root.queues':
updated_cap_sched_configs_str = updated_cap_sched_configs_str \
+ prop + "=default,llap\n"
elif prop == 'yarn.scheduler.capacity.root.default.capacity':
updated_cap_sched_configs_str = updated_cap_sched_configs_str \
+ prop + "=" + adjusted_default_queue_cap + "\n"
elif prop == 'yarn.scheduler.capacity.root.default.maximum-capacity':
updated_cap_sched_configs_str = updated_cap_sched_configs_str \
+ prop + "=" + adjusted_default_queue_cap + "\n"
elif prop == 'yarn.scheduler.capacity.root.ordering-policy':
# Don't put this in again. We're re-writing the llap section.
pass
elif prop.startswith('yarn.') and '.llap.' not in prop:
updated_cap_sched_configs_str = updated_cap_sched_configs_str + prop + "=" + val + "\n"
# Now, append the 'llap' queue related properties
updated_cap_sched_configs_str += """yarn.scheduler.capacity.root.ordering-policy=priority-utilization
yarn.scheduler.capacity.root.{0}.user-limit-factor=1
yarn.scheduler.capacity.root.{0}.state=RUNNING
yarn.scheduler.capacity.root.{0}.ordering-policy=fifo
yarn.scheduler.capacity.root.{0}.priority=10
yarn.scheduler.capacity.root.{0}.minimum-user-limit-percent=100
yarn.scheduler.capacity.root.{0}.maximum-capacity={1}
yarn.scheduler.capacity.root.{0}.capacity={1}
yarn.scheduler.capacity.root.{0}.acl_submit_applications={2}
yarn.scheduler.capacity.root.{0}.acl_administer_queue={2}
yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_queue_name, llap_queue_cap_perc, hive_user)
putCapSchedProperty("capacity-scheduler", updated_cap_sched_configs_str)
Logger.info("Updated 'capacity-scheduler' configs as one concatenated string.")
else:
# If capacity-scheduler configs are received as a dictionary (generally 1st time), we deposit the changed
# values back as dictionary itself.
# Update existing configs in 'capacity-scheduler'.
for prop, val in capacity_scheduler_properties.items():
if llap_queue_name not in prop:
if prop == 'yarn.scheduler.capacity.root.queues':
putCapSchedProperty(prop, 'default,llap')
elif prop == 'yarn.scheduler.capacity.root.default.capacity':
putCapSchedProperty(prop, adjusted_default_queue_cap)
elif prop == 'yarn.scheduler.capacity.root.default.maximum-capacity':
putCapSchedProperty(prop, adjusted_default_queue_cap)
elif prop == 'yarn.scheduler.capacity.root.ordering-policy':
# Don't put this in again. We're re-writing the llap section.
pass
elif prop.startswith('yarn.') and '.llap.' not in prop:
putCapSchedProperty(prop, val)
# Add new 'llap' queue related configs.
putCapSchedProperty("yarn.scheduler.capacity.root.ordering-policy", "priority-utilization")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".user-limit-factor", "1")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".state", "RUNNING")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".ordering-policy", "fifo")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".priority", "10")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".minimum-user-limit-percent", "100")
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".maximum-capacity", llap_queue_cap_perc)
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".capacity", llap_queue_cap_perc)
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".acl_submit_applications", hive_user)
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".acl_administer_queue", hive_user)
putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".maximum-am-resource-percent", "1")
Logger.info("Updated 'capacity-scheduler' configs as a dictionary.")
updated_cap_sched_configs_as_dict = True
if updated_cap_sched_configs_str or updated_cap_sched_configs_as_dict:
if len(leafQueueNames) == 1: # 'llap' queue didn't exist before
Logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \
.format(llap_queue_name, llap_queue_cap_perc, adjusted_default_queue_cap))
else: # Queue existed, only adjustments done.
Logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc))
Logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
# Update Hive 'hive.llap.daemon.queue.name' prop to use 'llap' queue.
putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', llap_queue_name)
putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', llap_queue_name)
# Update 'hive.llap.daemon.queue.name' prop combo entries and llap capacity slider visibility.
self.setLlapDaemonQueuePropAttributes(services, configurations)
else:
Logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
else:
Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
"""
Checks and sees (1). If only two leaf queues exist at root level, namely: 'default' and 'llap',
and (2). 'llap' is in RUNNING state.
If yes, performs the following actions: (1). 'llap' queue state set to STOPPED,
(2). 'llap' queue capacity set to 0 %,
(3). 'default' queue capacity set to 100 %
"""
def checkAndStopLlapQueue(self, services, configurations, llap_queue_name):
putCapSchedProperty = self.putProperty(configurations, "capacity-scheduler", services)
putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
updated_default_queue_configs = ''
updated_llap_queue_configs = ''
if capacity_scheduler_properties:
# Get all leaf queues.
leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
if len(leafQueueNames) == 2 and llap_queue_name in leafQueueNames and 'default' in leafQueueNames:
# Get 'llap' queue state
currLlapQueueState = 'STOPPED'
if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in capacity_scheduler_properties.keys():
currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state')
else:
Logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
return
if currLlapQueueState == 'RUNNING':
DEFAULT_MAX_CAPACITY = '100'
for prop, val in capacity_scheduler_properties.items():
# Update 'default' related configs in 'updated_default_queue_configs'
if llap_queue_name not in prop:
if prop == 'yarn.scheduler.capacity.root.default.capacity':
# Set 'default' capacity back to maximum val
updated_default_queue_configs = updated_default_queue_configs \
+ prop + "="+DEFAULT_MAX_CAPACITY + "\n"
elif prop == 'yarn.scheduler.capacity.root.default.maximum-capacity':
# Set 'default' max. capacity back to maximum val
updated_default_queue_configs = updated_default_queue_configs \
+ prop + "="+DEFAULT_MAX_CAPACITY + "\n"
elif prop == 'yarn.scheduler.capacity.root.ordering-policy':
# Don't set this property. The default will be picked up.
pass
elif prop.startswith('yarn.'):
updated_default_queue_configs = updated_default_queue_configs + prop + "=" + val + "\n"
else: # Update 'llap' related configs in 'updated_llap_queue_configs'
if prop == 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state':
updated_llap_queue_configs = updated_llap_queue_configs \
+ prop + "=STOPPED\n"
elif prop == 'yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity':
updated_llap_queue_configs = updated_llap_queue_configs \
+ prop + "=0\n"
elif prop == 'yarn.scheduler.capacity.root.'+llap_queue_name+'.maximum-capacity':
updated_llap_queue_configs = updated_llap_queue_configs \
+ prop + "=0\n"
elif prop.startswith('yarn.'):
updated_llap_queue_configs = updated_llap_queue_configs + prop + "=" + val + "\n"
else:
Logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState))
return
if updated_default_queue_configs and updated_llap_queue_configs:
putCapSchedProperty("capacity-scheduler", updated_default_queue_configs+updated_llap_queue_configs)
Logger.info("Changed YARN '{0}' queue state to 'STOPPED', and capacity to 0%. Adjusted 'default' queue capacity to : {1}%" \
.format(llap_queue_name, DEFAULT_MAX_CAPACITY))
# Update Hive 'hive.llap.daemon.queue.name' prop to use 'default' queue.
putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', self.YARN_ROOT_DEFAULT_QUEUE_NAME)
putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', self.YARN_ROOT_DEFAULT_QUEUE_NAME)
else:
Logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
else:
Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
def setLlapDaemonQueuePropAttributes(self, services, configurations):
"""
Checks and sets the 'Hive Server Interactive' 'hive.llap.daemon.queue.name' config Property Attributes. Takes into
account that 'capacity-scheduler' may have changed (got updated) in current Stack Advisor invocation.
"""
Logger.info("Determining 'hive.llap.daemon.queue.name' config Property Attributes.")
#TODO Determine if this is doing the right thing if some queue is setup with capacity=0, or is STOPPED. Maybe don't list it.
putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
capacity_scheduler_properties = dict()
# Read 'capacity-scheduler' from configurations if we modified and added recommendation to it, as part of current
# StackAdvisor invocation.
if "capacity-scheduler" in configurations:
cap_sched_props_as_dict = configurations["capacity-scheduler"]["properties"]
if 'capacity-scheduler' in cap_sched_props_as_dict:
cap_sched_props_as_str = configurations['capacity-scheduler']['properties']['capacity-scheduler']
if cap_sched_props_as_str:
cap_sched_props_as_str = str(cap_sched_props_as_str).split('\n')
if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 'null':
# Got 'capacity-scheduler' configs as one "\n" separated string
for property in cap_sched_props_as_str:
key, sep, value = property.partition("=")
capacity_scheduler_properties[key] = value
Logger.info("'capacity-scheduler' configs is set as a single '\\n' separated string in current invocation. "
"count(configurations['capacity-scheduler']['properties']['capacity-scheduler']) = "
"{0}".format(len(capacity_scheduler_properties)))
else:
Logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str))
else:
Logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str))
# if 'capacity_scheduler_properties' is empty, implies we may have 'capacity-scheduler' configs as dictionary
# in configurations, if 'capacity-scheduler' changed in current invocation.
if not capacity_scheduler_properties:
if isinstance(cap_sched_props_as_dict, dict) and len(cap_sched_props_as_dict) > 1:
capacity_scheduler_properties = cap_sched_props_as_dict
Logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.")
else:
Logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict))
else:
Logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.")
# if 'capacity_scheduler_properties' is still empty, implies 'capacity_scheduler' wasn't change in current
# SA invocation. Thus, read it from input : 'services'.
if not capacity_scheduler_properties:
capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
Logger.info("'capacity-scheduler' not changed in current Stack Advisor invocation. Retrieved the configs from services.")
# Get set of current YARN leaf queues.
leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
if leafQueueNames:
leafQueues = [{"label": str(queueName), "value": queueName} for queueName in leafQueueNames]
leafQueues = sorted(leafQueues, key=lambda q: q['value'])
putHiveInteractiveSitePropertyAttribute("hive.llap.daemon.queue.name", "entries", leafQueues)
Logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues))
else:
Logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve "
"'hive.server2.tez.default.queues' property attributes.")
def __getQueueCapacityKeyFromCapacityScheduler(self, capacity_scheduler_properties, llap_daemon_selected_queue_name):
"""
Retrieves the passed in queue's 'capacity' related key from Capacity Scheduler.
"""
# Identify the key which contains the capacity for 'llap_daemon_selected_queue_name'.
cap_sched_keys = capacity_scheduler_properties.keys()
llap_selected_queue_cap_key = None
current_selected_queue_for_llap_cap = None
for key in cap_sched_keys:
# Expected capacity prop key is of form : 'yarn.scheduler.capacity.<one or more queues in path separated by '.'>.[llap_daemon_selected_queue_name].capacity'
if key.endswith(llap_daemon_selected_queue_name+".capacity") and key.startswith("yarn.scheduler.capacity.root"):
Logger.info("DBG: Selected queue name as: " + key)
llap_selected_queue_cap_key = key
break;
return llap_selected_queue_cap_key
def __getQueueStateFromCapacityScheduler(self, capacity_scheduler_properties, llap_daemon_selected_queue_name):
"""
Retrieves the passed in queue's 'state' from Capacity Scheduler.
"""
# Identify the key which contains the state for 'llap_daemon_selected_queue_name'.
cap_sched_keys = capacity_scheduler_properties.keys()
llap_selected_queue_state_key = None
llap_selected_queue_state = None
for key in cap_sched_keys:
if key.endswith(llap_daemon_selected_queue_name+".state"):
llap_selected_queue_state_key = key
break;
llap_selected_queue_state = capacity_scheduler_properties.get(llap_selected_queue_state_key)
return llap_selected_queue_state
def __getQueueAmFractionFromCapacityScheduler(self, capacity_scheduler_properties, llap_daemon_selected_queue_name):
"""
Retrieves the passed in queue's 'AM fraction' from Capacity Scheduler. Returns default value of 0.1 if AM Percent
pertaining to passed-in queue is not present.
"""
# Identify the key which contains the AM fraction for 'llap_daemon_selected_queue_name'.
cap_sched_keys = capacity_scheduler_properties.keys()
llap_selected_queue_am_percent_key = None
for key in cap_sched_keys:
if key.endswith("."+llap_daemon_selected_queue_name+".maximum-am-resource-percent"):
llap_selected_queue_am_percent_key = key
Logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key))
break;
if llap_selected_queue_am_percent_key is None:
Logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name))
return 0.1 # Default value to use if we couldn't retrieve queue's corresponding AM Percent key.
else:
llap_selected_queue_am_percent = capacity_scheduler_properties.get(llap_selected_queue_am_percent_key)
Logger.info("Returning read value for key '{0}' as : '{1}' for queue : '{2}'".format(llap_selected_queue_am_percent_key,
llap_selected_queue_am_percent,
llap_daemon_selected_queue_name))
return llap_selected_queue_am_percent
def __getSelectedQueueTotalCap(self, capacity_scheduler_properties, llap_daemon_selected_queue_name, total_cluster_capacity):
"""
Calculates the total available capacity for the passed-in YARN queue of any level based on the percentages.
"""
Logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name))
available_capacity = total_cluster_capacity
queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
if queue_cap_key:
queue_cap_key = queue_cap_key.strip()
if len(queue_cap_key) >= 34: # len('yarn.scheduler.capacity.<single letter queue name>.capacity') = 34
# Expected capacity prop key is of form : 'yarn.scheduler.capacity.<one or more queues (path)>.capacity'
queue_path = queue_cap_key[24:] # Strip from beginning 'yarn.scheduler.capacity.'
queue_path = queue_path[0:-9] # Strip from end '.capacity'
queues_list = queue_path.split('.')
Logger.info("Queue list : {0}".format(queues_list))
if queues_list:
for queue in queues_list:
queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, queue)
queue_cap_perc = float(capacity_scheduler_properties.get(queue_cap_key))
available_capacity = queue_cap_perc / 100 * available_capacity
Logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity))
# returns the capacity calculated for passed-in queue in 'llap_daemon_selected_queue_name'.
return available_capacity
def recommendRangerKMSConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendRangerKMSConfigurations(configurations, clusterData, services, hosts)
security_enabled = self.isSecurityEnabled(services)
required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']}]
if security_enabled:
# recommendations for kms proxy related properties
self.recommendKMSProxyUsers(configurations, services, hosts, required_services)
else:
self.deleteKMSProxyUsers(configurations, services, hosts, required_services)
def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
has_ranger_tagsync = False
putTagsyncAppProperty = self.putProperty(configurations, "tagsync-application-properties", services)
putTagsyncSiteProperty = self.putProperty(configurations, "ranger-tagsync-site", services)
putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
application_properties = self.getServicesSiteProperties(services, "application-properties")
ranger_tagsync_host = self.getHostsForComponent(services, "RANGER", "RANGER_TAGSYNC")
has_ranger_tagsync = len(ranger_tagsync_host) > 0
if 'ATLAS' in servicesList and has_ranger_tagsync:
atlas_hosts = self.getHostNamesWithComponent("ATLAS", "ATLAS_SERVER", services)
atlas_host = 'localhost' if len(atlas_hosts) == 0 else atlas_hosts[0]
protocol = 'http'
atlas_port = '21000'
if application_properties and 'atlas.enableTLS' in application_properties and application_properties['atlas.enableTLS'].lower() == 'true':
protocol = 'https'
if 'atlas.server.https.port' in application_properties:
atlas_port = application_properties['atlas.server.https.port']
else:
protocol = 'http'
if application_properties and 'atlas.server.http.port' in application_properties:
atlas_port = application_properties['atlas.server.http.port']
atlas_rest_endpoint = '{0}://{1}:{2}'.format(protocol, atlas_host, atlas_port)
putTagsyncSiteProperty('ranger.tagsync.source.atlas', 'true')
putTagsyncSiteProperty('ranger.tagsync.source.atlasrest.endpoint', atlas_rest_endpoint)
zookeeper_host_port = self.getZKHostPortString(services)
if zookeeper_host_port and has_ranger_tagsync:
putTagsyncAppProperty('atlas.kafka.zookeeper.connect', zookeeper_host_port)
if 'KAFKA' in servicesList and has_ranger_tagsync:
kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", services)
kafka_port = '6667'
if 'kafka-broker' in services['configurations'] and (
'port' in services['configurations']['kafka-broker']['properties']):
kafka_port = services['configurations']['kafka-broker']['properties']['port']
kafka_host_port = []
for i in range(len(kafka_hosts)):
kafka_host_port.append(kafka_hosts[i] + ':' + kafka_port)
final_kafka_host = ",".join(kafka_host_port)
putTagsyncAppProperty('atlas.kafka.bootstrap.servers', final_kafka_host)
is_solr_cloud_enabled = False
if 'ranger-env' in services['configurations'] and 'is_solrCloud_enabled' in services['configurations']['ranger-env']['properties']:
is_solr_cloud_enabled = services['configurations']['ranger-env']['properties']['is_solrCloud_enabled'] == 'true'
is_external_solr_cloud_enabled = False
if 'ranger-env' in services['configurations'] and 'is_external_solrCloud_enabled' in services['configurations']['ranger-env']['properties']:
is_external_solr_cloud_enabled = services['configurations']['ranger-env']['properties']['is_external_solrCloud_enabled'] == 'true'
ranger_audit_zk_port = ''
if 'AMBARI_INFRA' in servicesList and zookeeper_host_port and is_solr_cloud_enabled and not is_external_solr_cloud_enabled:
zookeeper_host_port = zookeeper_host_port.split(',')
zookeeper_host_port.sort()
zookeeper_host_port = ",".join(zookeeper_host_port)
infra_solr_znode = '/infra-solr'
if 'infra-solr-env' in services['configurations'] and \
('infra_solr_znode' in services['configurations']['infra-solr-env']['properties']):
infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode']
ranger_audit_zk_port = '{0}{1}'.format(zookeeper_host_port, infra_solr_znode)
putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
elif zookeeper_host_port and is_solr_cloud_enabled and is_external_solr_cloud_enabled:
ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits')
putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
else:
putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE')
ranger_services = [
{'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-audit'},
{'service_name': 'YARN', 'audit_file': 'ranger-yarn-audit'},
{'service_name': 'HBASE', 'audit_file': 'ranger-hbase-audit'},
{'service_name': 'HIVE', 'audit_file': 'ranger-hive-audit'},
{'service_name': 'KNOX', 'audit_file': 'ranger-knox-audit'},
{'service_name': 'KAFKA', 'audit_file': 'ranger-kafka-audit'},
{'service_name': 'STORM', 'audit_file': 'ranger-storm-audit'},
{'service_name': 'RANGER_KMS', 'audit_file': 'ranger-kms-audit'},
{'service_name': 'ATLAS', 'audit_file': 'ranger-atlas-audit'}
]
for item in range(len(ranger_services)):
if ranger_services[item]['service_name'] in servicesList:
component_audit_file = ranger_services[item]['audit_file']
if component_audit_file in services["configurations"]:
ranger_audit_dict = [
{'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.urls', 'target_configname': 'xasecure.audit.destination.solr.urls'},
{'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.zookeepers', 'target_configname': 'xasecure.audit.destination.solr.zookeepers'}
]
putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
for item in ranger_audit_dict:
if item['filename'] in services["configurations"] and item['configname'] in services["configurations"][item['filename']]["properties"]:
if item['filename'] in configurations and item['configname'] in configurations[item['filename']]["properties"]:
rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
else:
rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
if "HDFS" in servicesList:
hdfs_user = None
if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
putRangerAdminProperty('ranger.kms.service.user.hdfs', hdfs_user)
if "HIVE" in servicesList:
hive_user = None
if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"]:
hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
putRangerAdminProperty('ranger.kms.service.user.hive', hive_user)
ranger_plugins_serviceuser = [
{'service_name': 'HDFS', 'file_name': 'hadoop-env', 'config_name': 'hdfs_user', 'target_configname': 'ranger.plugins.hdfs.serviceuser'},
{'service_name': 'HIVE', 'file_name': 'hive-env', 'config_name': 'hive_user', 'target_configname': 'ranger.plugins.hive.serviceuser'},
{'service_name': 'YARN', 'file_name': 'yarn-env', 'config_name': 'yarn_user', 'target_configname': 'ranger.plugins.yarn.serviceuser'},
{'service_name': 'HBASE', 'file_name': 'hbase-env', 'config_name': 'hbase_user', 'target_configname': 'ranger.plugins.hbase.serviceuser'},
{'service_name': 'KNOX', 'file_name': 'knox-env', 'config_name': 'knox_user', 'target_configname': 'ranger.plugins.knox.serviceuser'},
{'service_name': 'STORM', 'file_name': 'storm-env', 'config_name': 'storm_user', 'target_configname': 'ranger.plugins.storm.serviceuser'},
{'service_name': 'KAFKA', 'file_name': 'kafka-env', 'config_name': 'kafka_user', 'target_configname': 'ranger.plugins.kafka.serviceuser'},
{'service_name': 'RANGER_KMS', 'file_name': 'kms-env', 'config_name': 'kms_user', 'target_configname': 'ranger.plugins.kms.serviceuser'},
{'service_name': 'ATLAS', 'file_name': 'atlas-env', 'config_name': 'metadata_user', 'target_configname': 'ranger.plugins.atlas.serviceuser'}
]
for item in range(len(ranger_plugins_serviceuser)):
if ranger_plugins_serviceuser[item]['service_name'] in servicesList:
file_name = ranger_plugins_serviceuser[item]['file_name']
config_name = ranger_plugins_serviceuser[item]['config_name']
target_configname = ranger_plugins_serviceuser[item]['target_configname']
if file_name in services["configurations"] and config_name in services["configurations"][file_name]["properties"]:
service_user = services["configurations"][file_name]["properties"][config_name]
putRangerAdminProperty(target_configname, service_user)
if "ATLAS" in servicesList:
if "ranger-env" in services["configurations"]:
putAtlasRangerAuditProperty = self.putProperty(configurations, 'ranger-atlas-audit', services)
xasecure_audit_destination_hdfs = ''
xasecure_audit_destination_hdfs_dir = ''
xasecure_audit_destination_solr = ''
if 'xasecure.audit.destination.hdfs' in configurations['ranger-env']['properties']:
xasecure_audit_destination_hdfs = configurations['ranger-env']['properties']['xasecure.audit.destination.hdfs']
else:
xasecure_audit_destination_hdfs = services['configurations']['ranger-env']['properties']['xasecure.audit.destination.hdfs']
if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
xasecure_audit_destination_hdfs_dir = '{0}/{1}/{2}'.format(services['configurations']['core-site']['properties']['fs.defaultFS'] ,'ranger','audit')
if 'xasecure.audit.destination.solr' in configurations['ranger-env']['properties']:
xasecure_audit_destination_solr = configurations['ranger-env']['properties']['xasecure.audit.destination.solr']
else:
xasecure_audit_destination_solr = services['configurations']['ranger-env']['properties']['xasecure.audit.destination.solr']
putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs',xasecure_audit_destination_hdfs)
putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs.dir',xasecure_audit_destination_hdfs_dir)
putAtlasRangerAuditProperty('xasecure.audit.destination.solr',xasecure_audit_destination_solr)
required_services = [
{'service_name': 'ATLAS', 'config_type': 'ranger-atlas-security'}
]
# recommendation for ranger url for ranger-supported plugins
self.recommendRangerUrlConfigurations(configurations, services, required_services)
def validateRangerTagsyncConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
ranger_tagsync_properties = properties
validationItems = []
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
has_atlas = False
if "RANGER" in servicesList:
has_atlas = not "ATLAS" in servicesList
if has_atlas and 'ranger.tagsync.source.atlas' in ranger_tagsync_properties and \
ranger_tagsync_properties['ranger.tagsync.source.atlas'].lower() == 'true':
validationItems.append({"config-name": "ranger.tagsync.source.atlas",
"item": self.getWarnItem(
"Need to Install ATLAS service to set ranger.tagsync.source.atlas as true.")})
return self.toConfigurationValidationProblems(validationItems, "ranger-tagsync-site")
def __isServiceDeployed(self, services, serviceName):
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
return serviceName in servicesList
def isComponentUsingCardinalityForLayout(self, componentName):
return super(ADH15StackAdvisor, self).isComponentUsingCardinalityForLayout (componentName) or componentName in ['SPARK2_THRIFTSERVER', 'LIVY2_SERVER', 'LIVY_SERVER']
|
the-stack_0_28020
|
from jsonize.utils.json import *
from jsonize.utils.json import _write_item_in_array, _write_item_in_dict, _write_item_in_path
from copy import deepcopy
import unittest
test_json_path = JSONPath('$.key1.key2.key3')
test_json_path_2 = JSONPath('$.key2.key1.key3')
test_json_path_3 = JSONPath('$')
sample_dict_1 = {'key1': {'key2': {'key3': 42,
'other_key': -35,
'key4': {'key5': True,
'key6': False}}}}
sample_dict_2 = {'key1': 42}
sample_dict_3 = {'key2': {'key1': {'key3': True}}}
class TestGetItemJSONPath(unittest.TestCase):
def test_item_exists(self):
with self.subTest():
self.assertEqual(get_item_from_json_path(test_json_path, deepcopy(sample_dict_1)),
42)
with self.subTest():
self.assertEqual(get_item_from_json_path(JSONPath('$.key1.key2.key4'), deepcopy(sample_dict_1)),
{'key5': True, 'key6': False})
def test_get_root(self):
with self.subTest():
self.assertEqual(get_item_from_json_path(JSONPath('$'), sample_dict_1), deepcopy(sample_dict_1))
with self.subTest():
self.assertEqual(get_item_from_json_path(JSONPath('$'), sample_dict_2), deepcopy(sample_dict_2))
with self.subTest():
self.assertEqual(get_item_from_json_path(JSONPath('$'), sample_dict_3), deepcopy(sample_dict_3))
with self.subTest():
self.assertEqual(get_item_from_json_path(JSONPath('@'), sample_dict_2), deepcopy(sample_dict_2))
def test_item_doesnt_exist(self):
with self.assertRaises(KeyError):
get_item_from_json_path(JSONPath('$.key1.key2.another_key'), deepcopy(sample_dict_1))
with self.assertRaises(KeyError):
get_item_from_json_path(test_json_path_2, deepcopy(sample_dict_1))
def test_item_not_suscriptable(self):
with self.assertRaises(TypeError):
get_item_from_json_path(test_json_path, deepcopy(sample_dict_2))
def test_get_item_in_array(self):
input_array = [0, 1, 2, 3, 4, 5, 6]
self.assertEqual(get_item_from_json_path(JSONPath('$[3]'), input_array), 3)
def test_get_item_nested_arrays(self):
input = {'key1': 43,
'key2': [0, 1, [{'key3': True}, {'key4': False}]]}
self.assertEqual(get_item_from_json_path(JSONPath('$.key2[2][-1].key4'), input), False)
class TestWriteItemJSONPath(unittest.TestCase):
def test_overwrite_item(self):
with self.subTest():
result = write_item_in_path({'key2': {'key3': 42,
'other_key': -35,
'key4': {'key5': True,
'key6': False}}},
JSONPath('$.key1'), sample_dict_1)
self.assertEqual(result, {'key1': {'key2': {'key3': 42,
'other_key': -35,
'key4': {'key5': True,
'key6': False}}}})
with self.subTest():
result_2 = write_item_in_path(9, JSONPath('$.key1'), deepcopy(sample_dict_2))
self.assertEqual(result_2, {'key1': 9})
def test_write_new_item(self):
with self.subTest():
result = write_item_in_path('New value', JSONPath('$.new_key'), deepcopy(sample_dict_1))
reference = sample_dict_1
reference['new_key'] = 'New value'
self.assertEqual(reference, result)
with self.subTest():
result_2 = write_item_in_path({'new_subkey': True}, JSONPath('$.key2.new_key'), deepcopy(sample_dict_3))
reference_2 = sample_dict_3
reference_2['key2']['new_key'] = {'new_subkey': True}
self.assertEqual(result_2, reference_2)
def test_write_new_item_in_new_path(self):
result = write_item_in_path('New value', JSONPath('$.key2.key3'), deepcopy(sample_dict_2))
reference = {'key1': 42,
'key2': {'key3': 'New value'}}
self.assertEqual(reference, result)
def test_write_deeply_nested_item_in_new_path(self):
result = write_item_in_path('New Value', JSONPath('$.key1.key2.key3.key4.key5.key6'), deepcopy(sample_dict_3))
reference = {'key2':
{'key1':
{'key3': True}
},
'key1':
{'key2':
{'key3':
{'key4':
{'key5':
{'key6': 'New Value'}
}
}
}
}
}
self.assertEqual(reference, result)
def test_overwrite_item_conflicting_path(self):
reference = {'key1': {'key2': {'key3': 42,
'other_key': -35,
'key4': {'key5': {'bad_key': 'Overwrite'},
'key6': False}}}}
result = write_item_in_path('Overwrite', JSONPath('$.key1.key2.key4.key5.bad_key'), deepcopy(sample_dict_1))
self.assertEqual(reference, result)
def test_write_item_in_array(self):
with self.subTest('write dictionary in array'):
initial = {'key1': True,
'key2': {'key3': [{'key4': 42}]}}
reference = {'key1': True,
'key2': {'key3': [{'key4': 42}, {'key5': 43}]}
}
result = _write_item_in_array({'key5': 43}, JSONPath('$.key2.key3[-1]'), initial)
self.assertEqual(reference, result)
with self.subTest('write item in array at root'):
initial = []
reference = [3]
result = _write_item_in_array(3, JSONPath('$[0]'), initial)
self.assertEqual(reference, result)
with self.subTest('write item in array at relative root'):
initial = []
reference = [5]
result = _write_item_in_array(5, JSONPath('@[0]'), initial)
self.assertEqual(reference, result)
with self.subTest('write item in array in nested location'):
initial = {'key1': 1,
'key2': {'key3': [1, 1, 2, 3, 5],
'key4': 5}
}
reference = {'key1': 1,
'key2': {'key3': [1, 1, 8, 2, 3, 5],
'key4': 5}
}
result = _write_item_in_array(8, JSONPath('$.key2.key3[2]'), initial)
self.assertEqual(reference, result)
def test_write_item_nested_arrays(self):
with self.subTest():
initial = {'key1': 43,
'key2': [0, 1, [{'key3': True}, {'key4': False}]]}
reference = {'key1': 43,
'key2': [0, 1, [{'key3': True}, {'key4': False, 'key5': 'New Value'}]]}
self.assertEqual(write_item_in_path('New Value', JSONPath('$.key2[-1][-1].key5'), initial), reference)
with self.subTest():
initial = {'key1': 43,
'key2': [[0, 1, 2], [3, 4, 5], [6, 7, 8]]}
reference = {'key1': 43,
'key2': [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]}
self.assertEqual(_write_item_in_array(9, JSONPath('$.key2[2][3]'), initial), reference)
with self.subTest():
initial = {'key1': 43,
'key2': [[0, 1, 2], [3, 4, 5], [6, 7, 8]]}
reference = {'key1': 43,
'key2': [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]}
self.assertEqual(_write_item_in_array(9, JSONPath('$.key2[-1][-1]'), initial), reference)
def test_write_deep_item_in_array(self):
with self.subTest('write new deep item in array'):
initial = {'key1': 1,
'key2': {
'key3': [
{'subelement': 42,
'other': True,
'yet_another': [1, 2]}
],
'key4': 5}
}
reference = {'key1': 1,
'key2': {
'key3': [
{'subelement': 42,
'other': True,
'yet_another': [1, 2]},
{'subelement': 43}
],
'key4': 5}
}
self.assertEqual(write_item_in_path(43, JSONPath('$.key2.key3[1].subelement'), initial), reference)
class TestJSONPath(unittest.TestCase):
def test_split_absolute(self):
absolute_path = JSONPath('$.key1.key2.key3.key4')
split = absolute_path.split(2)
reference = JSONPath('$.key1'), JSONPath('@.key2.key3.key4')
self.assertTupleEqual(split, reference)
def test_split_relative(self):
relative_path = JSONPath('@.key1.key2.key3.key4')
split = relative_path.split(-2)
reference = JSONPath('@.key1.key2'), JSONPath('@.key3.key4')
self.assertTupleEqual(split, reference)
def test_split_root(self):
absolute_path = JSONPath('$')
split = absolute_path.split(-1)
reference = JSONPath('$'), JSONPath('@')
self.assertTupleEqual(split, reference)
def test_fail_out_of_bound(self):
absolute_path = JSONPath('$.key1.key2.key3.key4')
with self.assertRaises(IndexError):
absolute_path.split(6)
def test_split_at_final_node(self):
absolute_path = JSONPath('$.key1.key2.key3.key4')
split = absolute_path.split(5)
reference = absolute_path, JSONPath('@')
self.assertTupleEqual(split, reference)
def test_is_relative(self):
with self.subTest():
self.assertTrue(JSONPath('@.key1').is_relative())
with self.subTest():
self.assertFalse(JSONPath('$.key1.key2').is_relative())
def test_is_absolute(self):
with self.subTest():
self.assertTrue(JSONPath('$.key1').is_absolute())
with self.subTest():
self.assertFalse(JSONPath('@.key1.key2').is_absolute())
def test_append(self):
reference = JSONPath('$.key1.key2.key3.key4')
path = JSONPath('$.key1.key2')
path.append(JSONPath('@.key3.key4'))
self.assertEqual(reference, path)
def test_fail_append(self):
with self.assertRaises(ValueError):
reference = JSONPath('$.key1.key2.key3.key4')
self.assertEqual(reference, JSONPath('$.key1.key2').append(JSONPath('$.key3.key4')))
def test_json_path_structure(self):
with self.subTest():
reference_string = '$'
reference_path_structure = ['$']
self.assertEqual(JSONPath._json_path_structure(reference_string), reference_path_structure)
with self.subTest():
reference_string = '@'
reference_path_structure = ['@']
self.assertEqual(JSONPath._json_path_structure(reference_string), reference_path_structure)
with self.subTest():
reference_string = '$.key1.key2[-1].key3[1:5:2].key4[0:3][-1]'
reference_path_structure = ['$', 'key1', 'key2', -1, 'key3', slice(1, 5, 2), 'key4', slice(0, 3), -1]
self.assertEqual(JSONPath._json_path_structure(reference_string), reference_path_structure)
with self.subTest():
reference_string = '@[1].key2.key3'
reference_path_structure = ['@', 1, 'key2', 'key3']
self.assertEqual(JSONPath._json_path_structure(reference_string), reference_path_structure)
with self.subTest():
reference_string = '@[:3].key2.key3'
reference_path_structure = ['@', slice(None, 3), 'key2', 'key3']
self.assertEqual(JSONPath._json_path_structure(reference_string), reference_path_structure)
def test_string_representation(self):
with self.subTest():
reference_string = '$'
reference_path_structure = ['$']
self.assertEqual(JSONPath.string_representation(reference_path_structure), reference_string)
with self.subTest():
reference_string = '@'
reference_path_structure = ['@']
self.assertEqual(JSONPath.string_representation(reference_path_structure), reference_string)
with self.subTest():
reference_string = '$.key1.key2[-1].key3[1:5:2].key4[1:3][-1]'
reference_path_structure = ['$', 'key1', 'key2', -1, 'key3', slice(1, 5, 2), 'key4', slice(1, 3), -1]
self.assertEqual(JSONPath.string_representation(reference_path_structure), reference_string)
with self.subTest():
reference_string = '@[:3].key2.key3'
reference_path_structure = ['@', slice(None, 3), 'key2', 'key3']
self.assertEqual(JSONPath.string_representation(reference_path_structure), reference_string)
def test_build_from_path_structure(self):
with self.subTest():
from_string = JSONPath('$')
from_path_structure = JSONPath.from_json_path_structure(['$'])
self.assertEqual(from_string, from_path_structure)
with self.subTest():
from_string = JSONPath('@')
from_path_structure = JSONPath.from_json_path_structure(['@'])
self.assertEqual(from_string, from_path_structure)
with self.subTest():
from_string = JSONPath('$.key1.key2[-1].key3[1:5:2].key4[1:3][-1]')
from_path_structure = JSONPath.from_json_path_structure(['$', 'key1', 'key2', -1, 'key3', slice(1, 5, 2), 'key4', slice(1, 3), -1])
self.assertEqual(from_string, from_path_structure)
with self.subTest():
from_string = JSONPath('$.key1.key2[-1].key3[1:5:2].key4[1:3][-1]')
from_path_structure = JSONPath.from_json_path_structure(['$', 'key1', 'key2', -1, 'key3', slice(1, 5, 2), 'key4', slice(1, 3), -1])
self.assertEqual(from_string, from_path_structure)
class TestStringCasting(unittest.TestCase):
value_1 = '3'
value_2 = '2.0'
value_3 = '-4'
value_4 = 'inf'
value_5 = '-inf'
def test_str_is_int(self):
with self.subTest():
self.assertTrue(str_is_int(self.value_1))
with self.subTest():
self.assertTrue(str_is_int(self.value_3))
def test_str_is_not_int(self):
with self.subTest():
self.assertFalse(str_is_int(self.value_2))
with self.subTest():
self.assertFalse(str_is_int(self.value_4))
with self.subTest():
self.assertFalse(str_is_int(self.value_5))
def test_str_is_float(self):
with self.subTest():
self.assertTrue(str_is_float(self.value_2))
def test_str_is_not_float(self):
with self.subTest():
self.assertFalse(str_is_float(self.value_4))
with self.subTest():
self.assertFalse(str_is_float(self.value_5))
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.