filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22150 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import os
import numpy as np
from keras import backend as K
from keras import callbacks
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.TensorBoard):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved.
[Here are details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn more
about embeddings [in this guide](
https://www.tensorflow.org/programmers_guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
# Don't call super's init since it is an eager-only version.
callbacks.Callback.__init__(self)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and tf.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# One profiler session is running if it is True.
self._is_profiling = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if tf.executing_eagerly():
self.writer = tf.summary.create_file_writer(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
tf.summary.graph(K.get_graph())
elif self.write_graph:
self.writer = tf.compat.v1.summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf.compat.v1.summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf.compat.v1.summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = tf.compat.v1.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.compat.v1.transpose(w_img)
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = tf.compat.v1.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = tf.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf.compat.v1.summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf.compat.v1.summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf.compat.v1.summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf.compat.v1.summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not tf.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf.compat.v1.summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from keras.engine import training_utils_v1 # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils_v1.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = tf.compat.v1.placeholder(tf.int32)
self.step = step = tf.compat.v1.placeholder(tf.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = tf.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = tf.Variable(
tf.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = tf.compat.v1.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = tf.compat.v1.train.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Args:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if tf.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), tf.summary.record_if(True):
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
tf.summary.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf.compat.v1.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_batch_begin(self, batch, logs=None):
if (not self._is_profiling and
self._total_batches_seen == self._profile_batch - 1):
tf.profiler.experimental.start(self.log_dir)
self._is_profiling = True
def on_train_batch_end(self, batch, logs=None):
return self.on_batch_end(batch, logs)
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_profiling:
tf.profiler.experimental.stop()
self._is_profiling = False
def on_train_begin(self, logs=None):
pass
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
if self._is_profiling:
tf.profiler.experimental.stop()
self._is_profiling = False
self.writer.close()
|
the-stack_0_22153 | #!/usr/bin/env python
import sys
from framework.tee import Tee
from framework.manager import Manager
import tests
if __name__ == "__main__":
saveOutput = False
output = sys.stdout
testsToRun = []
for s in range (1, len(sys.argv)):
if (sys.argv[s] == '--saveOutput'):
saveOutput=True
elif sys.argv[s].startswith('--log='):
output = Tee( open( sys.argv[s].split('=', 1)[1], 'w'), sys.stdout )
else:
testsToRun.append(sys.argv[s])
# Search everywhere in this module for tests
manager = Manager()
manager.collect( sys.modules[__name__] )
if len(testsToRun):
manager.run(testsToRun, output=output, saveOutput=saveOutput)
else:
manager.run(output=output, saveOutput=saveOutput)
|
the-stack_0_22156 | # (C) Copyright 2007-2020 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import logging
from apptools.io.api import File
from pyface.api import FileDialog, OK
from pyface.action.api import Action
from traits.api import Any
from .editor.text_editor import TextEditor
logger = logging.getLogger(__name__)
class NewFileAction(Action):
""" Open a new file in the text editor.
"""
tooltip = "Create a new file for editing"
description = "Create a new file for editing"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
logger.info("NewFileAction.perform()")
self.window.workbench.edit(
File(""), kind=TextEditor, use_existing=False
)
class OpenFileAction(Action):
""" Open an existing file in the text editor.
"""
tooltip = "Open a file for editing"
description = "Open a file for editing"
def perform(self, event=None):
logger.info("OpenFileAction.perform()")
dialog = FileDialog(parent=self.window.control, title="Open File")
if dialog.open() == OK:
self.window.workbench.edit(File(dialog.path), kind=TextEditor)
|
the-stack_0_22157 | import datetime
from typing import Tuple
from aiobotocore.session import get_session
from fastapi_cache.backends import Backend
class DynamoBackend(Backend):
"""
Amazon DynamoDB backend provider
This backend requires an existing table within your AWS environment to be passed during
backend init. If ttl is going to be used, this needs to be manually enabled on the table
using the `ttl` key. Dynamo will take care of deleting outdated objects, but this is not
instant so don't be alarmed when they linger around for a bit.
As with all AWS clients, credentials will be taken from the environment. Check the AWS SDK
for more information.
Usage:
>> dynamodb = DynamoBackend(table_name="your-cache", region="eu-west-1")
>> await dynamodb.init()
>> FastAPICache.init(dynamodb)
"""
def __init__(self, table_name, region=None):
self.session = get_session()
self.client = None # Needs async init
self.table_name = table_name
self.region = region
async def init(self):
self.client = await self.session.create_client(
"dynamodb", region_name=self.region
).__aenter__()
async def close(self):
self.client = await self.client.__aexit__(None, None, None)
async def get_with_ttl(self, key: str) -> Tuple[int, str]:
response = await self.client.get_item(TableName=self.table_name, Key={"key": {"S": key}})
if "Item" in response:
value = response["Item"].get("value", {}).get("S")
ttl = response["Item"].get("ttl", {}).get("N")
if not ttl:
return -1, value
# It's only eventually consistent so we need to check ourselves
expire = int(ttl) - int(datetime.datetime.now().timestamp())
if expire > 0:
return expire, value
return 0, None
async def get(self, key) -> str:
response = await self.client.get_item(TableName=self.table_name, Key={"key": {"S": key}})
if "Item" in response:
return response["Item"].get("value", {}).get("S")
async def set(self, key: str, value: str, expire: int = None):
ttl = (
{
"ttl": {
"N": str(
int(
(
datetime.datetime.now() + datetime.timedelta(seconds=expire)
).timestamp()
)
)
}
}
if expire
else {}
)
await self.client.put_item(
TableName=self.table_name,
Item={
**{
"key": {"S": key},
"value": {"S": value},
},
**ttl,
},
)
async def clear(self, namespace: str = None, key: str = None) -> int:
raise NotImplementedError
|
the-stack_0_22159 | from django.contrib import admin
from .models import Star, VariabilityType
class StarAdmin(admin.ModelAdmin):
list_display = (
"get_constellation_display",
"name",
"variability_type",
"min_magnitude",
"max_magnitude",
)
list_display_links = ("name",)
def get_queryset(self, request):
queryset = super().get_queryset(request)
return queryset.select_related("variability_type")
class VariabilityTypeAdmin(admin.ModelAdmin):
list_display = ("code", "short_description")
admin.site.register(Star, StarAdmin)
admin.site.register(VariabilityType, VariabilityTypeAdmin)
|
the-stack_0_22160 | """
Launcher for experiments for Generalized Hindsight Experience Replay
"""
import torch
import argparse
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import setup_logger, set_seed, run_experiment
from rlkit.torch.sac.sac_gher import SACTrainer
from rlkit.torch.networks import LatentConditionedMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from rlkit.data_management.task_relabeling_replay_buffer import MultiTaskReplayBuffer
from rlkit.samplers.data_collector.path_collector import TaskConditionedPathCollector
from rlkit.torch.sac.policies import MakeDeterministicLatentPolicy, LatentConditionedTanhGaussianPolicy, \
TanhGaussianPolicy
from rlkit.util.hyperparameter import DeterministicHyperparameterSweeper
# relabelers
from rlkit.torch.multitask.pointmass_rewards import PointMassBestRandomRelabeler
from rlkit.torch.multitask.gym_relabelers import ReacherRelabelerWithGoalAndObs
from rlkit.torch.multitask.fetch_reach_relabelers import FetchReachRelabelerWithGoalAndObs
from rlkit.torch.multitask.half_cheetah_relabeler import HalfCheetahRelabelerMoreFeatures
from rlkit.torch.multitask.ant_direction_relabeler import AntDirectionRelabelerNewSparse
# envs
from gym.spaces import Discrete, MultiBinary
from rlkit.envs.point_robot_new import PointEnv as PointEnv2
from rlkit.envs.point_reacher_env import PointReacherEnv
from rlkit.envs.updated_half_cheetah import HalfCheetahEnv
from rlkit.envs.wrappers import NormalizedBoxEnv, TimeLimit
from rlkit.envs.fetch_reach import FetchReachEnv
from rlkit.envs.updated_ant import AntEnv
NUM_GPUS_AVAILABLE = 4 # change this to the number of gpus on your system
def experiment(variant):
set_seed(int(variant['seed']))
torch.manual_seed(int(args.seed))
if variant['mode'] != 'ec2' and not variant['local_docker'] and torch.cuda.is_available():
ptu.set_gpu_mode(True)
if variant['env_name'] == 'pointmass2':
print("pointmass")
expl_env = NormalizedBoxEnv(PointEnv2(**variant['env_kwargs']))
eval_env = NormalizedBoxEnv(PointEnv2(**variant['env_kwargs']))
relabeler_cls = PointMassBestRandomRelabeler
elif variant['env_name'] in {'antdirectionnewsparse'}:
print(variant['env_name'])
expl_env = NormalizedBoxEnv(AntEnv(**variant['env_kwargs']))
eval_env = NormalizedBoxEnv(AntEnv(**variant['env_kwargs']))
relabeler_cls = AntDirectionRelabelerNewSparse
elif variant['env_name'] in {'halfcheetahhard'}:
print("halfcheetah")
expl_env = NormalizedBoxEnv(HalfCheetahEnv())
eval_env = NormalizedBoxEnv(HalfCheetahEnv())
relabeler_cls = HalfCheetahRelabelerMoreFeatures
elif variant['env_name'] in {'pointreacherobs'}:
print('pointreacher')
expl_env = PointReacherEnv(**variant['env_kwargs'])
eval_env = PointReacherEnv(**variant['env_kwargs'])
relabeler_cls = ReacherRelabelerWithGoalAndObs
elif variant['env_name'] in {'fetchreach'}:
print('fetchreach')
expl_env = TimeLimit(NormalizedBoxEnv(FetchReachEnv(**variant['env_kwargs'])),
max_episode_steps=variant['algo_kwargs']['max_path_length'],
insert_time=variant['insert_time'])
eval_env = TimeLimit(NormalizedBoxEnv(FetchReachEnv(**variant['env_kwargs'])),
max_episode_steps=variant['algo_kwargs']['max_path_length'],
insert_time=variant['insert_time'])
relabeler_cls = FetchReachRelabelerWithGoalAndObs
variant['relabeler_kwargs']['fetchreach'] = variant['env_name'] == 'fetchreach'
else:
raise NotImplementedError
if isinstance(expl_env.observation_space, Discrete) or isinstance(expl_env.observation_space, MultiBinary):
obs_dim = expl_env.observation_space.n
else:
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
latent_dim = variant['replay_buffer_kwargs']['latent_dim']
qf1 = LatentConditionedMlp(
input_size=obs_dim + action_dim,
latent_size=latent_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = LatentConditionedMlp(
input_size=obs_dim + action_dim,
latent_size=latent_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = LatentConditionedMlp(
input_size=obs_dim + action_dim,
latent_size=latent_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = LatentConditionedMlp(
input_size=obs_dim + action_dim,
latent_size=latent_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = LatentConditionedTanhGaussianPolicy(
obs_dim=obs_dim,
latent_dim=latent_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_policy = MakeDeterministicLatentPolicy(policy)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
expl_policy = policy
variant['relabeler_kwargs']['discount'] = variant['trainer_kwargs']['discount']
relabeler = relabeler_cls(q1=qf1,
q2=qf2,
action_fn=eval_policy.wrapped_policy,
**variant['relabeler_kwargs'])
eval_relabeler = relabeler_cls(q1=qf1,
q2=qf2,
action_fn=eval_policy.wrapped_policy,
**variant['relabeler_kwargs'],
is_eval=True)
replay_buffer = MultiTaskReplayBuffer(
env=expl_env,
relabeler=relabeler,
**variant['replay_buffer_kwargs']
)
eval_path_collector = TaskConditionedPathCollector(
eval_env,
eval_policy,
eval_relabeler,
is_eval=True, # variant['plot'], # will attempt to plot if it's the pointmass
**variant['path_collector_kwargs']
)
expl_path_collector = TaskConditionedPathCollector(
expl_env,
expl_policy,
relabeler,
# calculate_rewards=False,
**variant['path_collector_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='pointmass', help='name of env to run on')
parser.add_argument('--alg', type=str, default='SAC', help='name of algorithm to run')
parser.add_argument('--n_sampled_latents', type=int, default=5, help="number of latents to sample")
parser.add_argument('--n_to_take', type=int, default=1,
help="number of latents to relabel with, should be less than n_sampled_latents")
parser.add_argument('--relabel', action='store_true', help='whether to relabel')
parser.add_argument('--use_advantages', '-use_adv', action='store_true', help='use_advantages for relabeling')
parser.add_argument('--irl', action='store_true',
help='use approximate irl to choose relabeling latents')
parser.add_argument('--plot', action='store_true', help='plot the trajectories')
parser.add_argument('--cache', action='store_true')
parser.add_argument('--sparse', type=float, default=None)
parser.add_argument('--ngradsteps', type=int, default=100)
parser.add_argument('--nexpl', type=int, default=None)
parser.add_argument('--horizon', type=int, default=None)
parser.add_argument('--tau', type=float, default=5E-3)
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--buffer_size', type=int, default=None)
parser.add_argument('--discount', type=float, default=None)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--ec2', '-ec2', action='store_true')
parser.add_argument('--local_docker', '-local_docker', action='store_true')
parser.add_argument('--reward_scale', type=float, default=None)
parser.add_argument('--insert_time', action='store_true')
parser.add_argument('--latent_shape_multiplier', type=int, default=1)
parser.add_argument('--latent_to_all_layers', action='store_true')
parser.add_argument('--seed', type=int, default=0, help="random seed")
parser.add_argument('--n_experiments', '-n', type=int, default=-1,
help="number of seeds to use. If not -1, overrides seed ")
# experiment name
parser.add_argument('--exp_name', '-name', type=str, default=None)
parser.add_argument('--extra', '-x', type=str, default=None)
parser.add_argument('--test', '-test', action='store_true')
parser.add_argument('--epochs', type=int, default=50, help="number of latents to sample")
parser.add_argument('--save_videos', action='store_true')
# for reacher
parser.add_argument('--safetyfn', '-safety', type=str, default='newlog') # newlog, linear, inverse
parser.add_argument('--energyfn', '-energy', type=str, default='velocity') # work, kinetic, velocity
parser.add_argument('--energyfactor', type=float, default=1.0, help="how much to multiply energy by")
# for fetch reacher
parser.add_argument('--truncate_obs', action='store_true', help='only return end_effector loc')
# for ant
parser.add_argument('--use_xy', action='store_true')
parser.add_argument('--contact_forces', action='store_true')
parser.add_argument('--directiontype', type=str, default='360')
args = parser.parse_args()
if args.n_experiments != -1:
seeds = list(range(10, 10 + 10 * args.n_experiments, 10))
else:
seeds = [args.seed]
assert args.n_to_take <= args.n_sampled_latents
variant = dict(
algorithm=args.alg,
env_name=args.env,
algo_kwargs=dict(
batch_size=256,
num_epochs=args.epochs,
num_eval_steps_per_epoch=5000,
num_expl_steps_per_train_loop=75,
num_trains_per_train_loop=args.ngradsteps,
min_num_steps_before_training=1000,
max_path_length=15,
),
trainer_kwargs=dict(
discount=0.90, # 0.99
soft_target_tau=args.tau,
target_update_period=1,
policy_lr=3E-3, # 3e-4
qf_lr=3E-3, # 3e-4
reward_scale=1,
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
max_replay_buffer_size=100000,
latent_dim=3,
approx_irl=args.irl,
plot=args.plot,
),
relabeler_kwargs=dict(
relabel=args.relabel,
use_adv=args.use_advantages,
n_sampled_latents=args.n_sampled_latents,
n_to_take=args.n_to_take,
cache=args.cache,
),
qf_kwargs=dict(
hidden_sizes=[300, 300, 300],
latent_shape_multiplier=args.latent_shape_multiplier,
latent_to_all_layers=args.latent_to_all_layers,
),
policy_kwargs=dict(
hidden_sizes=[300, 300, 300],
latent_shape_multiplier=args.latent_shape_multiplier,
latent_to_all_layers=args.latent_to_all_layers,
),
path_collector_kwargs=dict(
save_videos=args.save_videos
),
use_advantages=args.use_advantages,
proper_advantages=True,
plot=args.plot,
test=args.test,
gpu=args.gpu,
mode='ec2' if args.ec2 else 'here_no_doodad',
local_docker=args.local_docker,
insert_time=args.insert_time,
latent_shape_multiplier=args.latent_shape_multiplier
)
logger_kwargs = dict(snapshot_mode='gap_and_last', snapshot_gap=min(50, args.epochs - 1))
if args.env == 'pointmass2':
variant['relabeler_kwargs']['power'] = 1
variant['env_kwargs'] = dict(horizon=variant['algo_kwargs']['max_path_length'])
exp_postfix = ''
variant['algo_kwargs']['batch_size'] = 128
variant['qf_kwargs']['hidden_sizes'] = [400, 300]
variant['policy_kwargs']['hidden_sizes'] = [400, 300]
elif args.env in {'antdirectionnewsparse'}:
variant['replay_buffer_kwargs']['latent_dim'] = 1
if args.env in {'antdirectionnewsparse'}:
assert args.directiontype in {'90', '180', '360'}
variant['relabeler_kwargs']['type'] = args.directiontype
variant['algo_kwargs']['max_path_length'] = 1000
variant['trainer_kwargs']['discount'] = 0.99
variant['algo_kwargs']['num_expl_steps_per_train_loop'] = 1000
variant['algo_kwargs']['num_train_loops_per_epoch'] = 1
variant['algo_kwargs']['num_eval_steps_per_epoch'] = 25000
variant['algo_kwargs']['min_num_steps_before_training'] = 1000
variant['replay_buffer_kwargs']['max_replay_buffer_size'] = int(1E6)
variant['qf_kwargs']['hidden_sizes'] = [256, 256]
variant['policy_kwargs']['hidden_sizes'] = [256, 256]
variant['env_kwargs'] = dict(use_xy=args.use_xy, contact_forces=args.contact_forces)
exp_postfix = 'horizon{}'.format(variant['algo_kwargs']['max_path_length'])
elif args.env in {'halfcheetahhard'}:
variant['replay_buffer_kwargs']['latent_dim'] = 4
variant['algo_kwargs']['max_path_length'] = 1000
variant['trainer_kwargs']['discount'] = 0.99
variant['algo_kwargs']['num_expl_steps_per_train_loop'] = 1000
variant['algo_kwargs']['num_train_loops_per_epoch'] = 1
variant['algo_kwargs']['num_eval_steps_per_epoch'] = 25000
variant['algo_kwargs']['min_num_steps_before_training'] = 1000
variant['replay_buffer_kwargs']['max_replay_buffer_size'] = int(1E6)
variant['qf_kwargs']['hidden_sizes'] = [256, 256]
variant['policy_kwargs']['hidden_sizes'] = [256, 256]
exp_postfix = ''
elif args.env in {'pointreacherobs'}:
variant['algo_kwargs']['max_path_length'] = 20
variant['trainer_kwargs']['discount'] = 0.97
variant['algo_kwargs']['num_expl_steps_per_train_loop'] = 20
variant['algo_kwargs']['num_train_loops_per_epoch'] = 5
variant['algo_kwargs']['num_eval_steps_per_epoch'] = 1000
variant['replay_buffer_kwargs']['max_replay_buffer_size'] = 2000
variant['env_kwargs'] = dict(horizon=20)
exp_postfix = 'horizon{}'.format(variant['algo_kwargs']['max_path_length'])
if args.sparse:
exp_postfix += 'sparse{}'.format(str(args.sparse))
variant['replay_buffer_kwargs']['latent_dim'] = 6
print('using sparse reward if specified')
variant['relabeler_kwargs']['sparse_reward'] = args.sparse
variant['relabeler_kwargs']['fixed_ratio'] = None
elif args.env in {'fetchreach'}:
variant['replay_buffer_kwargs']['latent_dim'] = 8
variant['env_kwargs'] = dict(truncate_obs=args.truncate_obs)
variant['algo_kwargs']['max_path_length'] = 50
variant['trainer_kwargs']['discount'] = 0.98
variant['algo_kwargs']['num_expl_steps_per_train_loop'] = 250
variant['algo_kwargs']['num_train_loops_per_epoch'] = 1
variant['algo_kwargs']['num_eval_steps_per_epoch'] = 25 * 50
variant['replay_buffer_kwargs']['max_replay_buffer_size'] = 250000
variant['qf_kwargs']['hidden_sizes'] = [256, 256]
variant['policy_kwargs']['hidden_sizes'] = [256, 256]
exp_postfix = 'horizon{}'.format(variant['algo_kwargs']['max_path_length'])
variant['relabeler_kwargs']['sparse_reward'] = args.sparse
if args.sparse:
exp_postfix += 'sparse{}'.format(str(args.sparse))
else:
raise NotImplementedError
# various command line argument changing
if args.nexpl is not None:
variant['algo_kwargs']['num_expl_steps_per_train_loop'] = args.nexpl
if args.discount is not None:
variant['trainer_kwargs']['discount'] = args.discount
if args.lr is not None:
variant['trainer_kwargs']['policy_lr'] = args.lr
variant['trainer_kwargs']['qf_lr'] = args.lr
if args.buffer_size is not None:
variant['replay_buffer_kwargs']['max_replay_buffer_size'] = args.buffer_size
if args.reward_scale is not None and args.reward_scale > 0:
variant['trainer_kwargs']['reward_scale'] = args.reward_scale
variant['trainer_kwargs']['use_automatic_entropy_tuning'] = False
if args.exp_name is not None:
exp_dir = args.exp_name
else:
exp_dir = 'gher-{}-{}-{}e-{}s-disc{}'.format(args.env,
variant['algorithm'],
str(args.epochs),
str(variant['algo_kwargs']['num_expl_steps_per_train_loop']),
str(variant['trainer_kwargs']['discount']))
if len(exp_postfix) > 0:
exp_dir += '-' + exp_postfix
if args.extra is not None:
exp_dir += '-' + args.extra
if args.test:
exp_dir += '-test'
sweeper = DeterministicHyperparameterSweeper(dict(seed=seeds), variant)
all_variants = sweeper.iterate_hyperparameters()
for i, variant in enumerate(all_variants):
variant['gpu_id'] = i % NUM_GPUS_AVAILABLE
for variant in all_variants:
if args.ec2:
run_experiment(experiment, mode='ec2', exp_prefix=exp_dir, variant=variant,
seed=variant['seed'], **logger_kwargs, use_gpu=False,
instance_type=None,
spot_price=None,
verbose=False,
region='us-west-1',
num_exps_per_instance=1)
elif args.local_docker:
run_experiment(experiment, mode='local_docker', exp_prefix=exp_dir, variant=variant,
seed=variant['seed'], **logger_kwargs, use_gpu=False,
instance_type=None,
spot_price=None,
verbose=False,
region='us-west-1',
num_exps_per_instance=1)
else:
setup_logger(exp_dir, variant=variant, seed=variant['seed'], **logger_kwargs)
experiment(variant)
|
the-stack_0_22162 | from typing import FrozenSet, Type, Optional
import torch
from hypergraph_nets.hypergraphs import HypergraphsTuple
from strips_hgn.features import (
GlobalFeatureMapper,
HyperedgeFeatureMapper,
NodeFeatureMapper,
)
from strips_hgn.features.node_features import PropositionInStateAndGoal
from strips_hgn.hypergraph.hypergraph_view import HypergraphView
from strips_hgn.models.hypergraph_nets_adaptor import (
hypergraph_view_to_hypergraphs_tuple,
)
from strips_hgn.planning import Proposition, STRIPSProblem
class BaseFeatureMappingWorkflow(object):
""" Base Workflow which maps features """
def __init__(
self,
global_feature_mapper_cls: Type[GlobalFeatureMapper],
node_feature_mapper_cls: Type[NodeFeatureMapper],
hyperedge_feature_mapper_cls: Type[HyperedgeFeatureMapper],
max_receivers: int,
max_senders: int,
):
# Feature mappers
self._global_feature_mapper_cls = global_feature_mapper_cls
self._node_feature_mapper_cls = node_feature_mapper_cls
self._hyperedge_feature_mapper_cls = hyperedge_feature_mapper_cls
# Global feature mappers do not require context to anything, so we can
# use a static object
self._static_global_feature_mapper = global_feature_mapper_cls()
# Hyperedge feature mappers do not require context to the current state
# (at least for now), so we can use the same mapper
self._static_hyperedge_feature_mapper = hyperedge_feature_mapper_cls()
# Max receivers and senders
self.max_receivers = max_receivers
self.max_senders = max_senders
def _get_global_feature_mapper(self) -> GlobalFeatureMapper:
""" Get the Global feature mapper """
return self._static_global_feature_mapper
def _get_hyperedge_feature_mapper(
self, problem: STRIPSProblem
) -> HyperedgeFeatureMapper:
"""
Get the Hyperedge feature mapper for a STRIPS problem. For now, they do
not require context to the current state, so we can just use the same
static mapper.
Parameters
----------
problem: the STRIPS problem
Returns
-------
HyperedgeFeatureMapper
"""
return self._static_hyperedge_feature_mapper
def _get_node_feature_mapper(self, current_state: FrozenSet[Proposition], problem: STRIPSProblem, target: Optional[FrozenSet[Proposition]] = None) -> NodeFeatureMapper:
"""
The node feature mappers need to be instantiated based on the current
state and goal states. Hence, a separate one is needed for each
state and planning problem
Parameters
----------
current_state: the current state
problem: the STRIPS problem
Returns
-------
NodeFeatureMapper
"""
if self._node_feature_mapper_cls == PropositionInStateAndGoal:
# Create node feature mapper for current state and the goal
self._node_feature_mapper_cls: Type[PropositionInStateAndGoal]
# if target is not given, set to goals by default
if not target:
target=problem.goals
return self._node_feature_mapper_cls(
current_state=current_state, goal_state=target
)
else:
raise RuntimeError(
f"Unsupported node feature mapper "
f"{self._node_feature_mapper_cls}"
)
def _get_input_hypergraphs_tuple(
self, current_state: FrozenSet[Proposition], hypergraph: HypergraphView, target: Optional[FrozenSet[Proposition]] = None
) -> HypergraphsTuple:
"""
Computes and returns the input HypergraphsTuple for a state and a
hypergraph view of the planning problem with its:
- Node features
- Hyperedge features
- *NO* global features as we don't support them at the moment
Parameters
----------
current_state: the current state
hypergraph: view of the hypergraph
Returns
-------
HypergraphsTuple
"""
# Get the global features and reshape so its shape is 1 x n
global_features = hypergraph.global_features(
self._get_global_feature_mapper()
)
global_features = (
torch.tensor(global_features, dtype=torch.float32).reshape(1, -1)
if global_features
else None
)
return hypergraph_view_to_hypergraphs_tuple(
hypergraph=hypergraph,
receiver_k=self.max_receivers,
sender_k=self.max_senders,
# Map the nodes to their features
node_features=torch.tensor(
hypergraph.node_features(
self._get_node_feature_mapper(
current_state, hypergraph.problem, target
)
),
dtype=torch.float32,
),
# Map the hyperedges to their features
edge_features=torch.tensor(
hypergraph.hyperedge_features(
self._get_hyperedge_feature_mapper(hypergraph.problem)
),
dtype=torch.float32,
),
# Map the hypergraph to its global features
global_features=global_features,
)
|
the-stack_0_22163 | """base
Revision ID: ae12a0763d41
Revises:
Create Date: 2020-06-07 14:39:24.381306
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ae12a0763d41'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('guilds',
sa.Column('id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('youtube_playlists',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('channel', sa.String(length=100), nullable=True),
sa.Column('playlist_id', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('playlist_id')
)
op.create_table('youtube_videos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('video_id', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('video_id')
)
op.create_table('guild_playlists',
sa.Column('guild_id', sa.BigInteger(), nullable=True),
sa.Column('playlist_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['guild_id'], ['guilds.id'], ),
sa.ForeignKeyConstraint(['playlist_id'], ['youtube_playlists.id'], )
)
op.create_table('playlist_videos',
sa.Column('playlist_id', sa.BigInteger(), nullable=True),
sa.Column('video_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['playlist_id'], ['youtube_playlists.id'], ),
sa.ForeignKeyConstraint(['video_id'], ['youtube_videos.id'], )
)
op.create_table('settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('value', sa.String(length=500), nullable=False),
sa.Column('guild_id', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['guild_id'], ['guilds.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('settings')
op.drop_table('playlist_videos')
op.drop_table('guild_playlists')
op.drop_table('youtube_videos')
op.drop_table('youtube_playlists')
op.drop_table('guilds')
# ### end Alembic commands ###
|
the-stack_0_22164 | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import contextlib
import subprocess
import sys
from io import StringIO
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .eval import SoS_exec
from .executor_utils import (clear_output, create_task, get_traceback_msg,
kill_all_subprocesses, prepare_env,
reevaluate_output, statementMD5, validate_step_sig,
verify_input)
from .targets import RemovedTarget, RuntimeInfo, UnavailableLock, sos_targets
from .utils import ArgumentError, StopInputGroup, TerminateExecution, ProcessKilled, env
@contextlib.contextmanager
def stdoutIO():
oldout = sys.stdout
olderr = sys.stderr
stdout = StringIO()
stderr = StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
sys.stdout = oldout
sys.stderr = olderr
def execute_substep(stmt,
global_def,
global_vars,
task='',
task_params='',
proc_vars={},
shared_vars=[],
config={}):
'''Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
'''
assert not env.zmq_context.closed
assert 'workflow_id' in proc_vars
assert 'step_id' in proc_vars
assert '_input' in proc_vars
assert '_output' in proc_vars
assert '_depends' in proc_vars
assert 'step_output' in proc_vars
assert '_index' in proc_vars
assert 'result_push_socket' in config["sockets"]
# this should not happen but check nevertheless
if env.result_socket_port is not None and env.result_socket_port != config[
"sockets"]["result_push_socket"]:
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
env.result_socket.connect(f'tcp://127.0.0.1:{env.result_socket_port}')
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config)
env.result_socket.send_pyobj(res)
def _execute_substep(stmt, global_def, global_vars, task, task_params,
proc_vars, shared_vars, config):
# passing configuration and port numbers to the subprocess
env.config.update(config)
# prepare a working environment with sos symbols and functions
prepare_env(global_def, global_vars)
# update it with variables passed from master process
env.sos_dict.quick_update(proc_vars)
if env.config['sig_mode'] == 'ignore' or env.sos_dict[
'_output'].unspecified():
sig = None
else:
sig = RuntimeInfo(
statementMD5([stmt, task]),
env.sos_dict['_input'],
env.sos_dict['_output'],
env.sos_dict['_depends'],
env.sos_dict['__signature_vars__'],
shared_vars=shared_vars)
outmsg = ''
errmsg = ''
capture_output = env.config['run_mode'] == 'interactive'
try:
if sig:
matched = validate_step_sig(sig)
if matched:
# avoid sig being released in the final statement
sig = None
# complete case: concurrent ignore without task
send_message_to_controller(
['progress', 'substep_ignored', env.sos_dict['step_id']])
res = {
'index': env.sos_dict['_index'],
'ret_code': 0,
'sig_skipped': 1,
'output': matched['output'],
'shared': matched['vars']
}
if task:
# if there is task, let the master know that the task is
# skipped
res['task_id'] = None
return res
sig.lock()
# check if input and depends targets actually exist
#
# if depends on a sos_variable but the variable is not actually used in
# the substep, it is ok to ignore it. If the variable is used in the substep
# it should have been included as part of the signature variables.
verify_input(ignore_internal_targets=True)
if stmt:
# statement can be empty for task only substep
if capture_output:
with stdoutIO() as (out, err):
SoS_exec(stmt, return_result=False)
outmsg = out.getvalue()
errmsg = err.getvalue()
else:
SoS_exec(stmt, return_result=False)
if task:
task_id, taskdef, task_vars = create_task(global_def, global_vars,
task, task_params)
res = {
'index': env.sos_dict['_index'],
'task_id': task_id,
'task_def': taskdef,
'task_vars': task_vars
}
else:
if env.sos_dict['step_output'].undetermined():
env.sos_dict.set('_output', reevaluate_output())
res = {'index': env.sos_dict['_index'], 'ret_code': 0}
if sig:
sig.set_output(env.sos_dict['_output'])
# sig.write will use env.master_push_socket
if sig.write():
res['shared'] = sig.content['end_context']
if 'output_obj' in sig.content:
res['output'] = sig.content['output_obj']
else:
res['output'] = env.sos_dict['_output']
if capture_output:
res.update({'stdout': outmsg, 'stderr': errmsg})
# complete case: concurrent execution without task
send_message_to_controller(
['progress', 'substep_completed', env.sos_dict['step_id']])
return res
except (StopInputGroup, TerminateExecution, RemovedTarget,
UnavailableLock) as e:
# stop_if is not considered as an error
if isinstance(e, StopInputGroup):
if e.message:
env.logger.info(e.message)
# we do not really treat this as an exception
if env.sos_dict['step_output'].undetermined():
env.sos_dict.set('_output', reevaluate_output())
res = {'index': env.sos_dict['_index'], 'ret_code': 0}
if task:
res['task_id'] = None
if not e.keep_output:
# treat as an error
clear_output()
res['output'] = sos_targets([])
elif sig:
sig.set_output(env.sos_dict['_output'])
# sig.write will use env.master_push_socket
if sig.write():
res['shared'] = sig.content['end_context']
if 'output_obj' in sig.content:
res['output'] = sig.content['output_obj']
else:
res['output'] = env.sos_dict['_output']
else:
clear_output()
res = {
'index': env.sos_dict['_index'],
'ret_code': 1,
'exception': e
}
if capture_output:
res.update({'stdout': outmsg, 'stderr': errmsg})
return res
except (KeyboardInterrupt, SystemExit) as e:
clear_output()
kill_all_subprocesses()
raise e
except subprocess.CalledProcessError as e:
clear_output()
# cannot pass CalledProcessError back because it is not pickleable
res = {
'index': env.sos_dict['_index'],
'ret_code': e.returncode,
'exception': RuntimeError(e.stderr)
}
if capture_output:
res.update({'stdout': outmsg, 'stderr': errmsg})
return res
except ArgumentError as e:
clear_output()
return {'index': env.sos_dict['_index'], 'ret_code': 1, 'exception': e}
except ProcessKilled as e:
clear_output()
res = {'index': env.sos_dict['_index'], 'ret_code': 1, 'exception': e}
return res
except Exception as e:
clear_output()
res = {
'index': env.sos_dict['_index'],
'ret_code': 1,
'exception': RuntimeError(get_traceback_msg(e))
}
if capture_output:
res.update({'stdout': outmsg, 'stderr': errmsg})
return res
finally:
# release the lock even if the process becomes zombie? #871
if sig:
sig.release(quiet=True)
|
the-stack_0_22165 | import cv2
import numpy as np
import os
from FaceDetector import FaceDetector
from KalmanFilter2D import KalmanFilter2D
def closest_pairs(array1, array2):
array1 = np.asarray(array1)
array2 = np.asarray(array2)
diff = np.abs(array1[:, None] - array2[None, :])
pairs = []
found = []
for _ in range(np.min(diff.shape)):
idx = np.argmin(diff)
x,y = np.unravel_index(idx, diff.shape)
diff[x,:] = 10000
diff[:,y] = 10000
pairs.append((x,y))
found.append(x)
unassigned = np.setdiff1d(np.arange(len(array1)), found)
return pairs, unassigned
class FaceTracking():
def __init__(self, lifetime):
self.fd = FaceDetector()
self.Ts = 1/30
self.R = 507
self.Qp = 10
self.Qv = 0.01
self.faces = []
self.lifetime = lifetime
self.focus = 0
def __del__(self):
self.fd.__del__()
def runDetection(self, img):
boxes, centers, labels, probs = self.fd.process(img)
center_list = [p[0] for p in centers]
face_list = [p.get_position()[0] for p in self.faces]
if len(self.faces) < len(boxes):
self.faces.append(KalmanFilter2D(self.Ts, self.R, self.Qp, self.Qv))
pairs, unassigned = closest_pairs(face_list, center_list)
for face_index, center_index in pairs:
if self.faces[face_index].lifetime < self.lifetime:
self.faces[face_index].inc()
z_k = np.asarray(centers[center_index])
x_k = self.faces[face_index].get_position()
dist = np.linalg.norm(z_k - x_k)
mah_dist = np.sqrt(((z_k - x_k)[None] @ np.linalg.inv(self.faces[face_index].P[:2, :2]) @ (z_k - x_k)[:, None])[0,0])
R = mah_dist*10
self.faces[face_index].run_filter(z_k, R)
pop_index = -1
for face_index in unassigned:
self.faces[face_index].dec()
self.faces[face_index].run_filter(np.zeros((2,)), 10000000)
if(self.faces[face_index].lifetime < 0):
pop_index = face_index
if pop_index != -1:
self.faces.pop(face_index)
if self.focus + 1 > len(self.faces):
self.focus = 0
for i, fac in enumerate(self.faces):
if np.max(np.linalg.eig(fac.P[:2, :2])[0]) < 1000:
if i == self.focus and fac.lifetime >= self.lifetime / 2:
cv2.circle(img, tuple([int(x) for x in fac.get_position()]), radius=0, color=(255, 0, 0), thickness=20)
elif fac.lifetime >= self.lifetime / 2:
cv2.circle(img, tuple([int(x) for x in fac.get_position()]), radius=0, color=(0, 0, 255), thickness=20)
for box in boxes:
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
return img
def getDetectionCount(self):
return len(self.faces)
def setFocus(self, focus):
if focus + 1 > len(self.faces):
self.focus = 0
else:
self.focus = focus
def getFocus(self):
return self.focus
def getFocusLocation(self):
if len(self.faces) > 0:
return self.faces[self.focus].get_position()
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = "dance3.mp4"
cap = cv2.VideoCapture(os.path.join(dir_path, filename))
tracker = FaceTracking(lifetime=50)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
img = tracker.runDetection(frame)
print(tracker.getDetectionCount())
print(tracker.getFocusLocation())
cv2.imshow("frame", img)
key = cv2.waitKey(1)
if key == ord('s'):
tracker.setFocus(tracker.getFocus() + 1)
cap.release()
cv2.destroyAllWindows() |
the-stack_0_22167 | #
# abstract_net.py
#
# Author(s):
# Matteo Spallanzani <[email protected]>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch
from torch import nn
class QuantProperties:
def __init__(self, dtype : str = "float32", step_size : float = None, n_steps : int = None):
assert dtype[0:5] == "float" or dtype[0:3] == "int", "Invalid dtype supplied to QuantProperties: {}".format(dtype)
bits_str = dtype[5:] if dtype[0:5] == "float" else dtype[3:]
assert bits_str.isdigit(), "Invalid dtype supplied to QuantProperties: {}".format(dtype)
self.n_bits = int(bits_str)
# if dtype is float, it's OK for n_steps and step_size to be None.
self.n_steps = n_steps
self.step_size = step_size
#if dtype is int, fall back to defaults
if dtype[0:3] == "int":
if not n_steps:
self.n_steps = 2**(self.n_bits)-1
assert self.n_steps % 2 == 1, "n_steps in QuantProperties must be odd, but it's {}!".format(self.n_steps)
# by default, treat integers as ordinary integers (step size 1)
if not step_size:
self.step_size = 1.0
self.dtype = dtype
@classmethod
def from_numpy(cls, data : np.ndarray = None):
# default: float32
if data is None:
return cls()
# otherwise, return QP according to data's dtype with default settings
return cls(data.dtype.name)
class AbstractTensor:
def __init__(self, data : torch.Tensor, name : str, shape : tuple = None, is_param : bool = False, qp : QuantProperties = None):
assert isinstance(name, str), "AbstractTensor name must be string, not {}".format(type(name).__name__)
self.name = name
if isinstance(data, torch.Tensor):
data = data.clone().detach().numpy()
if is_param:
assert data is not None, "Parameter tensor must have data!"
if shape is not None:
assert data.shape == shape, "Mismatching tensor and shape specified: Tensor has shape {}, specified was {}".format(data.shape, shape)
shape = data.shape
self.shape = shape
self.data = data
if qp is None:
qp = QuantProperties.from_numpy(data)
self.qp = qp
def __getattribute__(self, item):
# overload this to provide direct access to quantProperties attributes
try:
return super(AbstractTensor, self).__getattribute__(item)
except AttributeError as e_orig:
try:
return self.qp.__getattribute__(item)
except AttributeError:
raise e_orig
@property
def numel(self):
if self.shape is None or None in self.shape:
return None
n = 1
for el in self.shape:
n *= el
return n
# operators
class AbstractOperator:
# any operator keeps lists of tensors it operates on:
# - parameters contains tensors of fixed size and with fixed values
# which the operator uses as parameters
# - inputs contains tensors it uses as inputs
# - outputs contains tensors it uses as outputs
# - all_tensors contains the concatenation of the above
# Tensors are stored in dicts but are accessible as lists
def __init__(self):
self.parameter_dict = {}
self.input_dict = {}
self.output_dict = {}
# helper function to add an unnamed tensor
def _add_tensor(self, t : AbstractTensor, d : dict, base_key : str):
key = "{}_{}".format(base_key, len(d)+1)
done = False
cnt = 1
while not done:
try:
# if the key already exists in the inputs dict, modify it until
# we find one that does not
cur_val = d[key]
if cnt > 1:
key = key[:-2]
key += "_{}".format(cnt)
cnt += 1
except KeyError:
d[key] = t
done = True
def add_input(self, inp):
self._add_tensor(inp, self.input_dict, "input")
def add_output(self, outp):
self._add_tensor(outp, self.output_dict, "output")
def add_param(self, param):
self._add_tensor(param, self.parameter_dict, "param")
# make inputs/outputs/parameters easy to access as lists
@property
def inputs(self):
return list(self.input_dict.values())
@property
def parameters(self):
return list(self.parameter_dict.values())
@property
def outputs(self):
return list(self.output_dict.values())
@property
def all_tensors(self):
return self.inputs + self.outputs + self.parameters
class AbstractNet:
# interface for complete (sequential) network
def __init__(self, name : str):
self.name = name
self.layers = []
def add_layer(self, l : AbstractOperator):
# add a layer to the network
# returns nothing (to be overridden in child classes)
self.layers.append(l)
def _get_tensors(self, name):
tensors = []
for l in self.layers:
tensor_list = l.__getattribute__(name)
tensors.extend([t for t in tensor_list if t not in tensors])
return tensors
@property
def all_tensors(self):
return self._get_tensors("all_tensors")
@property
def parameters(self):
return self._get_tensors("parameters")
@property
def data_tensors(self):
# all tensor except parameters
all_t = self.all_tensors
p = self.parameters
return [t for t in all_t if t not in p]
|
the-stack_0_22169 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="blank_index"),
path('index/', views.index, name="index"),
path('about/', views.about, name="about"),
path('register/', views.register_page, name="register"),
path('login/', views.login_page, name="login"),
path('logout/', views.logout_user, name="logout")
] |
the-stack_0_22170 | import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='yanchor',
parent_name='scatterpolargl.marker.colorbar',
**kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
values=['top', 'middle', 'bottom'],
**kwargs
)
|
the-stack_0_22171 | import os
import ujson
from functools import partial
from colbert.utils.utils import print_message
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer, tensorize_triples
from colbert.utils.runs import Run
class EagerBatcher():
def __init__(self, args, rank=0, nranks=1):
self.rank, self.nranks = rank, nranks
self.bsize, self.accumsteps = args.bsize, args.accumsteps
self.query_tokenizer = QueryTokenizer(args.query_maxlen, args.pretrained_tokenizer)
self.doc_tokenizer = DocTokenizer(args.doc_maxlen, args.pretrained_tokenizer)
self.tensorize_triples = partial(tensorize_triples, self.query_tokenizer, self.doc_tokenizer)
self.triples_path = args.triples
self._reset_triples()
def _reset_triples(self):
self.reader = open(self.triples_path, mode='r', encoding="utf-8")
self.position = 0
def __iter__(self):
return self
def __next__(self):
queries, positives, negatives = [], [], []
for line_idx, line in zip(range(self.bsize * self.nranks), self.reader):
if (self.position + line_idx) % self.nranks != self.rank:
continue
query, pos, neg = line.strip().split('\t')
queries.append(query)
positives.append(pos)
negatives.append(neg)
self.position += line_idx + 1
if len(queries) < self.bsize:
raise StopIteration
return self.collate(queries, positives, negatives)
def collate(self, queries, positives, negatives):
assert len(queries) == len(positives) == len(negatives) == self.bsize
return self.tensorize_triples(queries, positives, negatives, self.bsize // self.accumsteps)
def skip_to_batch(self, batch_idx, intended_batch_size):
self._reset_triples()
Run.warn(f'Skipping to batch #{batch_idx} (with intended_batch_size = {intended_batch_size}) for training.')
_ = [self.reader.readline() for _ in range(batch_idx * intended_batch_size)]
return None
|
the-stack_0_22172 | import os
from work.unet import unet_model_functions
from keras.callbacks import ReduceLROnPlateau
from work.auxiliary import data_functions
from work.auxiliary.logger_settings import configure_logger
import logging
LOG_PATH = os.path.abspath('logs')
DATA_PATH = os.path.abspath('data')
log_path = data_functions.create_path(LOG_PATH, 'unet_logs')
configure_logger(name="cherry_stem",
console_level='INFO',
file_level='INFO',
out_path=log_path)
logger = logging.getLogger(__name__)
def main():
"""
for model debugging
:return:
"""
# train_path = os.path.join(DATA_PATH, r'raw_data\with_maskes')
#train_path = os.path.join(DATA_PATH, r'segmentation\augmented')
train_path = os.path.join(DATA_PATH, r'segmentation\augmented')
dest_path = os.path.join(DATA_PATH, r'unet_data\training')
test_path = os.path.join(DATA_PATH, r'raw_data\images_orig')
src_path = os.path.join(DATA_PATH,
r'unet_data\training\2019-09-30_07-19-46')
pretrained_weights = os.path.join(DATA_PATH,
r'unet_data\training\2019-09-30_07-19-46\unet_cherry_stem.hdf5')
steps = None
params_dict = dict(
train_path=train_path,
save_path=dest_path,
x_folder_name='image',
y_folder_name='label',
save_name='cherry',
pretrained_weights=pretrained_weights,
checkpoint=None,
data_gen_args=dict(rotation_range=180,
brightness_range=[0.2, 1.],
shear_range=5,
zoom_range=0.5,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'),
seed=78,
optimizer='Adam',
optimizer_params=dict(lr=1e-5,
amsgrad=False),
loss='binary_crossentropy',
metrics=[],
target_size=[256, 256],
color_mode='grayscale',
batch_size=10, # my GPU cant handel any more
epochs=15,
steps_per_epoch=100,
validation_split=0.2,
validation_steps=200,
tensorboard_update_freq=1000,
weights_update_freq='epoch',
save_weights_only=True,
ontop_display_threshold=0.4,
callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2,
verbose=1, mode='auto', min_delta=0.0001,
cooldown=0, min_lr=1e-6)])
update_dict = dict(
callbacks=[ReduceLROnPlateau(monitor='loss', factor=0.2, patience=2,
verbose=1, mode='auto', min_delta=0.0001,
cooldown=0, min_lr=1e-6)])
model = unet_model_functions.ClarifruitUnet(**params_dict)
#model = unet_model_functions.ClarifruitUnet.load_model(src_path,update_dict,steps)
keras_logs_path=model.set_model_for_train()
logger.info(f"for tensorboard, use: \ntensorboard --logdir={keras_logs_path}")
model.fit_unet()
#model.prediction(test_path=test_path,dest_path=dest_path)
if __name__ == '__main__':
main()
|
the-stack_0_22174 | from setuptools import setup, find_packages
# io.open is needed for projects that support Python 2.7
# Python 3 only projects can skip this import
from io import open
# Get the long description from the README file
with open('README.md', encoding='utf-8', errors="ignore") as f:
long_description = f.read()
setup(
name='codechecker_api_shared',
version='6.27.0',
description='Shared API stub types package for the CodeChecker API.',
long_description_content_type='text/markdown',
long_description=long_description,
url='https://github.com/Ericsson/codechecker',
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='codechecker thrift api library',
packages=find_packages(where='.'), # Required
python_requires='>=2.7,',
project_urls={ # Optional
'Bug Reports': 'https://github.com/Ericsson/codechecker/issues',
'Source': 'https://github.com/Ericsson/codechecker/tree/master/web/api',
},
)
|
the-stack_0_22175 | # -*- coding: utf-8 -*-
import os
import numpy as np
import sys
import struct
from torch.utils.data import Dataset
import scipy.io as scio
def pixel2world(x, y, z, img_width, img_height, fx, fy):
w_x = (x - img_width / 2) * z / fx
w_y = (img_height / 2 - y) * z / fy
w_z = z
return w_x, w_y, w_z
def world2pixel(x, y, z, img_width, img_height, fx, fy):
p_x = x * fx / z + img_width / 2
p_y = img_height / 2 - y * fy / z
return p_x, p_y
def depthmap2points(image, fx, fy):
h, w = image.shape
x, y = np.meshgrid(np.arange(w) + 1, np.arange(h) + 1)
points = np.zeros((h, w, 3), dtype=np.float32)
points[:,:,0], points[:,:,1], points[:,:,2] = pixel2world(x, y, image, w, h, fx, fy)
return points
def points2pixels(points, img_width, img_height, fx, fy):
pixels = np.zeros((points.shape[0], 2))
pixels[:, 0], pixels[:, 1] = \
world2pixel(points[:,0], points[:, 1], points[:, 2], img_width, img_height, fx, fy)
return pixels
def load_depthmap(filename, img_width, img_height, max_depth):
with open(filename, mode='rb') as f:
data = f.read()
num_pixel = (img_width*img_height)
cropped_image = struct.unpack('f'*num_pixel, data[:num_pixel * 4 ])
cropped_image = np.asarray(cropped_image).reshape(img_height, -1)
depth_image = np.zeros((img_height, img_width), dtype=np.float32)
depth_image[0:img_height, 0:img_width ] = cropped_image
depth_image[depth_image == 0] = max_depth
return depth_image
class NYUDataset(Dataset):
def __init__(self, root, center_dir, transform=None):
self.img_width = 640
self.img_height = 480
self.max_depth = 1200
self.fx = 588.03
self.fy = 587.07
self.joint_num = 21
self.world_dim = 3
self.root = root
self.center_dir = center_dir
self.transform = transform
self._load()
def __getitem__(self, index):
depthmap_img = load_depthmap(self.names[index], self.img_width, self.img_height, self.max_depth)
points = depthmap2points(depthmap_img, self.fx, self.fy)
points = points.reshape((-1, 3))
sample = {
'name': self.names[index],
'points': points,
'refpoint': self.ref_pts[index]
}
if self.transform: sample = self.transform(sample)
return sample
def __len__(self):
return self.num_samples
def _load(self):
self.num_samples = 8252
self.ref_pts = np.zeros((self.num_samples, self.world_dim))
self.names = []
ref_pt_file = 'center_test_refined.txt'
with open(os.path.join(self.center_dir, ref_pt_file)) as f:
ref_pt_str = [l.rstrip() for l in f]
#
file_id = 0
frame_id = 0
for i in range(0, 8252):
# referece point
splitted = ref_pt_str[file_id].split()
if splitted[0] == 'invalid':
print('Warning: found invalid reference frame')
file_id += 1
continue
else:
self.ref_pts[frame_id, 0] = float(splitted[0])
self.ref_pts[frame_id, 1] = float(splitted[1])
self.ref_pts[frame_id, 2] = float(splitted[2])
filename = os.path.join(self.root, str(i) + '.bin')
self.names.append(filename)
frame_id += 1
file_id += 1
class NYUDataset_train(Dataset):
def __init__(self, root, center_dir, transform=None):
self.img_width = 640
self.img_height = 480
self.max_depth = 1200
self.fx = 588.03
self.fy = 587.07
self.joint_num = 21
self.world_dim = 3
self.root = root
self.center_dir = center_dir
self.transform = transform
self._load()
def __getitem__(self, index):
depthmap_img = load_depthmap(self.names[index], self.img_width, self.img_height, self.max_depth)
points = depthmap2points(depthmap_img, self.fx, self.fy)
points = points.reshape((-1, 3))
sample = {
'name': self.names[index],
'points': points,
'joints': self.joints_world[index],
'refpoint': self.ref_pts[index]
}
if self.transform: sample = self.transform(sample)
return sample
def __len__(self):
return self.num_samples
def _load(self):
self.num_samples = 72757
self.ref_pts = np.zeros((self.num_samples, self.world_dim))
self.joints_world = np.zeros((self.num_samples, self.joint_num, self.world_dim))
self.names = []
keypoint_file = self.root + 'joint_data.mat'
ref_pt_file = 'center_train_refined.txt'
with open(os.path.join(self.center_dir, ref_pt_file)) as f:
ref_pt_str = [l.rstrip() for l in f]
keypointsXYZ_test = scio.loadmat(keypoint_file)["joint_xyz"].astype(np.float32)[0]
EVAL_JOINTS = np.array([
0, 6, 12, 18, 24,
1, 7, 13, 19, 25,
4, 10, 15, 21, 26,
5, 11, 17, 23, 28,
29 ])
keypointsXYZ_test = keypointsXYZ_test[::][:,EVAL_JOINTS,:]
file_id = 0
frame_id = 0
for i in range(0, 72757):
# referece point
splitted = ref_pt_str[file_id].split()
if splitted[0] == 'invalid':
print('Warning: found invalid reference frame')
file_id += 1
continue
else:
self.ref_pts[frame_id, 0] = float(splitted[0])
self.ref_pts[frame_id, 1] = float(splitted[1])
self.ref_pts[frame_id, 2] = float(splitted[2])
self.joints_world[i] = keypointsXYZ_test[i]
filename = os.path.join(self.root, 'depth_1_{:0>7d}.bin'.format(i+1))
self.names.append(filename)
frame_id += 1
file_id += 1
|
the-stack_0_22178 | from itertools import combinations
def nonDivisibleSubset(k, s):
# Write your code here
res=[]
for pair in list(combinations(s, 2)):
if sum(pair)%k != 0:
# print(res, pair)
res+=list(pair)
print(set(res))
return len(set(res))
if __name__ == '__main__':
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
print(result)
|
the-stack_0_22179 | """Check consistency of project."""
import re
MAX_CODE_LINES = 120
MAX_LINE_LENGTH = 120
def check(options, config, xref, seen):
"""Check various aspects of project."""
if "bib" in options.check:
_check_bib(config, seen)
if "code" in options.check:
_check_code(config)
if "gloss" in options.check:
_check_gloss(config, seen)
# ----------------------------------------------------------------------
def _check_bib(config, seen):
"""Check consistency of bibliography."""
_show_unused("biblography", config.bib_keys - seen.cite)
key = re.compile(r"^[A-Z][A-Za-z]+\d{4}[a-z]?$")
previous = None
for entry in config.bib_data:
# Alphabetic order by key (ignoring case).
if previous and (previous["ID"].lower() > entry["ID"].lower()):
print(f"Bibliography entry {entry['ID']} out of order.")
previous = entry
# Keys are Name and 4-digit year.
if not key.match(entry["ID"]):
print(f"Badly-formatted bibliography key {entry['ID']}.")
def _check_code(config):
"""Check code inclusions."""
pre = re.compile(
r'<pre\s+title="(.*?)"><code\s+class="(.*?)">(.*?)</code></pre>', re.DOTALL
)
lang = re.compile(r"^language-.+$")
for info in config.pages:
for (title, cls, body) in pre.findall(info.html):
body = body.split("\n")
# Code class.
if not lang.search(cls):
print(
f"Code block {title} in {info.src} has unrecognized class {cls}."
)
# Number of lines.
if len(body) > MAX_CODE_LINES:
print(
f"Code block {title} in {info.src} has {len(body)} lines (> {MAX_CODE_LINES})."
)
# Line length.
long_lines = [x for x in body if len(x) > MAX_LINE_LENGTH]
if long_lines:
print(
f"Code block {title} in {info.src} has {len(long_lines)} long lines (> {MAX_LINE_LENGTH})."
)
def _check_gloss(config, seen):
"""Check consistency of glossary."""
_show_unused("glossary", config.gloss_keys - seen.gloss_ref)
previous = None
lang = config.lang
for entry in config.gloss_data:
# Alphabetic order by key (ignoring case).
if previous and (previous[lang]["term"].lower() > entry[lang]["term"].lower()):
print(f"Glossary entry {entry[lang]['key']} out of order.")
previous = entry
def _show_unused(kind, unused):
if not unused:
return
print(f"Unused {kind} keys:")
for key in sorted(unused):
print(f"- {key}")
|
the-stack_0_22180 | """
Reference: https://github.com/openai/imitation
I follow the architecture from the official repository
"""
import gym
import tensorflow as tf
import numpy as np
from stable_baselines.common.mpi_running_mean_std import RunningMeanStd
from stable_baselines.common import tf_util as tf_util
def logsigmoid(input_tensor):
"""
Equivalent to tf.log(tf.sigmoid(a))
:param input_tensor: (tf.Tensor)
:return: (tf.Tensor)
"""
return -tf.nn.softplus(-input_tensor)
def logit_bernoulli_entropy(logits):
"""
Reference:
https://github.com/openai/imitation/blob/99fbccf3e060b6e6c739bdf209758620fcdefd3c/policyopt/thutil.py#L48-L51
:param logits: (tf.Tensor) the logits
:return: (tf.Tensor) the Bernoulli entropy
"""
ent = (1. - tf.nn.sigmoid(logits)) * logits - logsigmoid(logits)
return ent
class TransitionClassifier(object):
def __init__(self, observation_space, action_space, hidden_size,
entcoeff=0.001, scope="adversary", normalize=True):
"""
Reward regression from observations and transitions
:param observation_space: (gym.spaces)
:param action_space: (gym.spaces)
:param hidden_size: ([int]) the hidden dimension for the MLP
:param entcoeff: (float) the entropy loss weight
:param scope: (str) tensorflow variable scope
:param normalize: (bool) Whether to normalize the reward or not
"""
# TODO: support images properly (using a CNN)
self.scope = scope
self.observation_shape = observation_space.shape
self.actions_shape = action_space.shape
if isinstance(action_space, gym.spaces.Box):
# Continuous action space
self.discrete_actions = False
self.n_actions = action_space.shape[0]
elif isinstance(action_space, gym.spaces.Discrete):
self.n_actions = action_space.n
self.discrete_actions = True
else:
raise ValueError('Action space not supported: {}'.format(action_space))
self.hidden_size = hidden_size
self.normalize = normalize
self.obs_rms = None
# Placeholders
self.generator_obs_ph = tf.placeholder(observation_space.dtype, (None,) + self.observation_shape,
name="observations_ph")
self.generator_acs_ph = tf.placeholder(action_space.dtype, (None,) + self.actions_shape,
name="actions_ph")
self.expert_obs_ph = tf.placeholder(observation_space.dtype, (None,) + self.observation_shape,
name="expert_observations_ph")
self.expert_acs_ph = tf.placeholder(action_space.dtype, (None,) + self.actions_shape,
name="expert_actions_ph")
# Build graph
generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)
expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)
# Build accuracy
generator_acc = tf.reduce_mean(tf.cast(tf.nn.sigmoid(generator_logits) < 0.5, tf.float32))
expert_acc = tf.reduce_mean(tf.cast(tf.nn.sigmoid(expert_logits) > 0.5, tf.float32))
# Build regression loss
# let x = logits, z = targets.
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits,
labels=tf.zeros_like(generator_logits))
generator_loss = tf.reduce_mean(generator_loss)
expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
expert_loss = tf.reduce_mean(expert_loss)
# Build entropy loss
logits = tf.concat([generator_logits, expert_logits], 0)
entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
entropy_loss = -entcoeff * entropy
# Loss + Accuracy terms
self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
self.total_loss = generator_loss + expert_loss + entropy_loss
# Build Reward for policy
self.reward_op = -tf.log(1 - tf.nn.sigmoid(generator_logits) + 1e-8)
var_list = self.get_trainable_variables()
self.lossandgrad = tf_util.function(
[self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph],
self.losses + [tf_util.flatgrad(self.total_loss, var_list)])
def build_graph(self, obs_ph, acs_ph, reuse=False):
"""
build the graph
:param obs_ph: (tf.Tensor) the observation placeholder
:param acs_ph: (tf.Tensor) the action placeholder
:param reuse: (bool)
:return: (tf.Tensor) the graph output
"""
with tf.variable_scope(self.scope):
if reuse:
tf.get_variable_scope().reuse_variables()
if self.normalize:
with tf.variable_scope("obfilter"):
self.obs_rms = RunningMeanStd(shape=self.observation_shape)
obs = (obs_ph - self.obs_rms.mean) / self.obs_rms.std
else:
obs = obs_ph
if self.discrete_actions:
one_hot_actions = tf.one_hot(acs_ph, self.n_actions)
actions_ph = tf.cast(one_hot_actions, tf.float32)
else:
actions_ph = acs_ph
_input = tf.concat([obs, actions_ph], axis=1) # concatenate the two input -> form a transition
p_h1 = tf.contrib.layers.fully_connected(_input, self.hidden_size, activation_fn=tf.nn.tanh)
p_h2 = tf.contrib.layers.fully_connected(p_h1, self.hidden_size, activation_fn=tf.nn.tanh)
logits = tf.contrib.layers.fully_connected(p_h2, 1, activation_fn=tf.identity)
return logits
def get_trainable_variables(self):
"""
Get all the trainable variables from the graph
:return: ([tf.Tensor]) the variables
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_reward(self, obs, actions):
"""
Predict the reward using the observation and action
:param obs: (tf.Tensor or np.ndarray) the observation
:param actions: (tf.Tensor or np.ndarray) the action
:return: (np.ndarray) the reward
"""
sess = tf.get_default_session()
if len(obs.shape) == 1:
obs = np.expand_dims(obs, 0)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 0)
elif len(actions.shape) == 0:
# one discrete action
actions = np.expand_dims(actions, 0)
feed_dict = {self.generator_obs_ph: obs, self.generator_acs_ph: actions}
reward = sess.run(self.reward_op, feed_dict)
return reward
|
the-stack_0_22182 | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.global_variables_initializer().run()
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
batch = mnist.test.next_batch(2000)
print("test accuracy %g" % accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}))
|
the-stack_0_22184 | from typing import Any, Dict, List, Optional
from .mock_server_exception import MockServerException
class MockServerRequestNotFoundException(MockServerException):
"""
Exception when we found no expectation for a request
"""
def __init__(
self,
method: Optional[str],
url: Optional[str],
json_list: Optional[List[Dict[str, Any]]],
) -> None:
"""
Exception when we found no expectation for a request
:param method: method of request
:param url: url of request
:param json_list: json body
"""
self.method: Optional[str] = method
self.url: Optional[str] = url
self.json_dict: Optional[List[Dict[str, Any]]] = json_list
assert (
not json_list or isinstance(json_list, dict) or isinstance(json_list, list)
), type(json_list)
super().__init__(f"Request was not expected: {url} {json_list}")
|
the-stack_0_22186 | from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer, training_ops
import tensorflow as tf
import numpy as np
class DFA(optimizer.Optimizer): # A lot copy-pasted from the optimizer base class, with anything that gave errors taken out...
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, learning_rate=0.001, stddev=0.5, use_locking=False, name="DFA"):
super(DFA, self).__init__(use_locking, name)
self._lr = learning_rate
self._stddev = stddev
self._B = []
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") # Normally these are in _prepare but it seems that doesn't get called before compute_gradients for this implementation?
self._stddev_t = ops.convert_to_tensor(self._stddev, name="standard_deviation")
def _prepare(self): # idk
pass
def compute_gradients(self, loss, var_list=None, # Copy-paste and black magic
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
if gate_gradients not in [optimizer.Optimizer.GATE_NONE, optimizer.Optimizer.GATE_OP,
optimizer.Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
tf.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [optimizer._get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
act_refs = []
if(self._B == []):
stddev = math_ops.cast(self._stddev_t, tf.float32)
for v in var_refs:
shape = [10]+v.get_shape().as_list()
shape.reverse()
if(self._stddev == 0):
stddev = tf.rsqrt(math_ops.cast(v.shape[-1], tf.float32))
self._B.append(tf.Variable(tf.random_uniform(shape, minval=tf.multiply(-1., stddev), maxval=stddev), trainable=False, name=v.op.name+'/B')) # random matrix, uniform distribution between [-stddev, stddev]
act_name = "/".join(v.op.name.split("/")[:-1])+"/activations"
act_refs.append(tf.get_default_graph().get_operation_by_name(act_name).outputs[0])
f_grad = tf.reduce_mean(tf.gradients(loss, act_refs[-1])[0], axis=0)
grads = [tf.multiply(tf.reduce_sum(tf.transpose(tf.multiply(f_grad, self._B[i])),axis=0),
tf.gradients(act_refs[i], var_refs[i], grad_ys=grad_loss, gate_gradients=(gate_gradients == optimizer.Optimizer.GATE_OP), aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops)[0])
for i in range(0,len(var_refs)-2)]
grads.append(tf.gradients(loss, var_refs[-2],
grad_ys=grad_loss,
gate_gradients=(gate_gradients == optimizer.Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)[0])
grads.append(tf.gradients(loss, var_refs[-1],
gate_gradients=(gate_gradients == optimizer.Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)[0]) # Get the gradients for the final layer as tensorflow does normally(?)
if gate_gradients == optimizer.Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
|
the-stack_0_22187 | # Ex054.2
"""Create a program that reads the year of birth of seven people, at the end,
Show how many people are not yet of age and how many are already older"""
from datetime import date
current_year = date.today().year
adulthood = 0
minority = 0
for y in range(0, 7):
year_of_birth = int(input(f'What is the birth year of the {y + 1}° person?: '))
if year_of_birth + 18 <= current_year:
adulthood += 1
else:
minority += 1
print(f'Of the 07 registered people, {adulthood} are of legal age, and {minority} are minors')
|
the-stack_0_22188 | ###
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md
###
""" List Command for RDMC """
import redfish.ris
from optparse import OptionParser
from rdmc_base_classes import RdmcCommandBase
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS,\
NoContentsFoundForOperationError
class ListCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self,\
name='list',\
usage='list [OPTIONS]\n\n\tDisplays the current values of the ' \
'properties within\n\ta selected type including'\
' reserved properties\n\texample: list\n\n\tNOTE: If ' \
'you wish not to get all the reserved properties\n\t ' \
' run the get command instead',\
summary='Displays the current value(s) of a' \
' property(ies) within a selected type including'\
' reserved properties.',\
aliases=['ls'],\
optparser=OptionParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.lobobj = rdmcObj.commandsDict["LoginCommand"](rdmcObj)
self.selobj = rdmcObj.commandsDict["SelectCommand"](rdmcObj)
self.getobj = rdmcObj.commandsDict["GetCommand"](rdmcObj)
def run(self, line):
""" Wrapper function for main list function
:param line: command line input
:type line: string.
"""
try:
(options, args) = self._parse_arglist(line)
except:
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.listvalidation(options)
if args:
for arg in args:
newargs = list()
if "/" in arg:
newargs = arg.split("/")
arg = newargs[0]
if not self.getobj.getworkerfunction(arg, options, line,\
newargs=newargs, uselist=True):
raise NoContentsFoundForOperationError('No contents found '\
'for entry: %s\n' % arg)
else:
if not self.getobj.getworkerfunction(args, options, line, \
uselist=True):
raise NoContentsFoundForOperationError('No contents found.')
#Return code
return ReturnCodes.SUCCESS
def listvalidation(self, options):
""" List data validation function
:param options: command line options
:type options: list.
"""
inputline = list()
if self._rdmc.app.config._ac__format.lower() == 'json':
options.json = True
try:
self._rdmc.app.get_current_client()
except:
if options.user or options.password or options.url:
if options.url:
inputline.extend([options.url])
if options.user:
inputline.extend(["-u", options.user])
if options.password:
inputline.extend(["-p", options.password])
else:
if self._rdmc.app.config.get_url():
inputline.extend([self._rdmc.app.config.get_url()])
if self._rdmc.app.config.get_username():
inputline.extend(["-u", \
self._rdmc.app.config.get_username()])
if self._rdmc.app.config.get_password():
inputline.extend(["-p", \
self._rdmc.app.config.get_password()])
if len(inputline) and options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend(["--selector", options.selector])
self.lobobj.loginfunction(inputline)
elif options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([options.selector])
self.selobj.selectfunction(inputline)
else:
try:
inputline = list()
selector = self._rdmc.app.get_selector()
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([selector])
self.selobj.selectfunction(inputline)
except:
raise redfish.ris.NothingSelectedError
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
customparser.add_option(
'--url',
dest='url',
help="Use the provided URL to login.",
default=None,
)
customparser.add_option(
'-u',
'--user',
dest='user',
help="If you are not logged in yet, including this flag along"\
" with the password and URL flags can be used to log into a"\
" server in the same command.""",
default=None,
)
customparser.add_option(
'-p',
'--password',
dest='password',
help="""Use the provided password to log in.""",
default=None,
)
customparser.add_option(
'--includelogs',
dest='includelogs',
action="store_true",
help="Optionally include logs in the data retrieval process.",
default=False,
)
customparser.add_option(
'--selector',
dest='selector',
help="Optionally include this flag to select a type to run"\
" the current command on. Use this flag when you wish to"\
" select a type without entering another command, or if you"\
" wish to work with a type that is different from the one"\
" you currently have selected.",
default=None,
)
customparser.add_option(
'--filter',
dest='filter',
help="Optionally set a filter value for a filter attribute."\
" This uses the provided filter for the currently selected"\
" type. Note: Use this flag to narrow down your results. For"\
" example, selecting a common type might return multiple"\
" objects that are all of that type. If you want to modify"\
" the properties of only one of those objects, use the filter"\
" flag to narrow down results based on properties."\
"\t\t\t\t\t Usage: --filter [ATTRIBUTE]=[VALUE]",
default=None,
)
customparser.add_option(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
customparser.add_option(
'--logout',
dest='logout',
action="store_true",
help="Optionally include the logout flag to log out of the"\
" server after this command is completed. Using this flag when"\
" not logged in will have no effect",
default=None,
)
customparser.add_option(
'--path',
dest='path',
help="Optionally set a starting point for data collection."\
" If you do not specify a starting point, the default path"\
" will be /redfish/v1/. Note: The path flag can only be specified"\
" at the time of login, so if you are already logged into the"\
" server, the path flag will not change the path. If you are"\
" entering a command that isn't the login command, but include"\
" your login information, you can still specify the path flag"\
" there. ",
default=None,
)
|
the-stack_0_22189 | # @author SATO Kentaro
# @license BSD 2-Clause License
import struct
import sys
import urllib.robotparser
rp = urllib.robotparser.RobotFileParser('http://www.example.com')
while True:
try:
header = struct.unpack('>3l', sys.stdin.buffer.read(4 * 3))
text = ua = path = ''
# urllib.robotparser internally treats external robots.txt as UTF-8
if header[0]:
text = sys.stdin.buffer.read(header[0]).decode('utf-8')
if header[1]:
ua = sys.stdin.buffer.read(header[1]).decode('utf-8')
if header[2]:
path = sys.stdin.buffer.read(header[2]).decode('utf-8')
rp.parse(text.splitlines())
print('1' if rp.can_fetch(ua, 'http://www.example.com' + path) else '0')
print("<<<END>>>", file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
except EOFError:
break
|
the-stack_0_22190 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# Arguments
parser = argparse.ArgumentParser()
# Image indices to use for training and validation
parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,15,1)))
# GPUs to use
parser.add_argument('--GPUs', nargs='+', type=int, default=[0])
# Path to dataset
parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# Path to directory where models and tensorboard logs are stored
parser.add_argument('--outputPath', nargs='?', default="eval/runsMouse/")
# Path to model to use for testing
parser.add_argument('--checkpointPath', nargs='?', default="runs/2020_10_11__14:23:21_TrueB_0.1bias_5I_128BS_FalseSk_9FOV_3nT_0.03ths_a8d9a2c_commit_")
# File to use
parser.add_argument('--checkpointFileName', nargs='?', default="model_130")
# Write volumes to H5 file
parser.add_argument('--writeVolsToH5', type=str2bool, default=False)
# Write output to tensorboard
parser.add_argument('--writeToTB', type=str2bool, default=True)
argsTest = parser.parse_args()
nImgs = len(argsTest.imagesToUse)
# Setup multithreading
num_workers = 0
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
# Select GPUs to use
argsTest.GPUs = list(range(torch.cuda.device_count())) if argsTest.GPUs is None else argsTest.GPUs
print('Using GPUs: ' + str(argsTest.GPUs))
# Load checkpoint if provided
if argsTest.checkpointPath is not None:
checkpointPath = argsTest.checkpointPath + "/" + argsTest.checkpointFileName
checkpoint = torch.load(checkpointPath)
# overwrite args
argsModel = checkpoint['args']
argsModel.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(argsTest.GPUs[0]) if torch.cuda.is_available() else "cpu")
# Create output folder
save_folder = argsTest.outputPath + argsTest.checkpointPath[:-1].split('/')[1] + "_eval_" + datetime.now().strftime('%Y_%m_%d__%H:%M:%S')
print(save_folder)
# Create summary writer to log stuff
if argsTest.writeToTB:
writer = SummaryWriter(log_dir=save_folder)
# Load dataset
all_data = Dataset(argsTest.datasetPath, argsModel.randomSeed, \
fov=argsModel.fovInput, neighShape=argsModel.neighShape, img_indices=argsTest.imagesToUse, get_full_imgs=True, center_region=None)
# Create data loader
test_dataset = data.DataLoader(all_data, batch_size=1,
shuffle=False, num_workers=num_workers, pin_memory=True)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, argsModel.useBias, argsModel.useSkipCon, LFshape, LFfov=argsModel.fovInput, use_small_unet=argsModel.useShallowUnet).to(device)
lossFunction = nn.L1Loss()
lossFunction.eval()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(argsTest.GPUs[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1) #initialize torch.distributed
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=argsTest.GPUs, output_device=argsTest.GPUs[0]).to(device)
# Load network from checkpoint
net.load_state_dict(checkpoint['model_state_dict'])
# Move net to single GPU
net = net.module.to("cuda:1")
device = "cuda:1"
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
print('Testing')
net.eval()
avg_psnr = 0
avg_ssim = 0
avg_loss = 0
avg_time = 0
with torch.no_grad():
# Evaluate images
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,argsModel.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
start.record()
outputsVol = net(inputGPU)
end.record()
torch.cuda.synchronize()
curr_time = start.elapsed_time(end)
curr_loss = lossFunction(outputsGT,outputsVol).item()
avg_loss += curr_loss / len(test_dataset)
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.detach(), outputsGT.to(device).detach())
curr_psnr = 10 * math.log10(1 / lossMSE.item())
avg_psnr += curr_psnr / len(test_dataset)
curr_ssim = ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach(), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().to(device).detach()).sum().item()
avg_ssim += curr_ssim / len(test_dataset)
avg_time += curr_time / len(test_dataset)
if argsTest.writeVolsToH5:
h5file = h5py.File(save_folder+"/ReconVol_"+argsTest.checkpointFileName+'_'+str(nBatch+min(argsTest.imagesToUse))+".h5", 'w')
h5file.create_dataset("LF4D", data=inputGPU.detach().cpu().squeeze().numpy())
h5file.create_dataset("LFimg", data=LF2Spatial(inputGPU, inputGPU.shape[2:]).squeeze().cpu().detach().numpy())
h5file.create_dataset("GT", data=outputsGT.detach().cpu().squeeze().numpy())
h5file.create_dataset("reconFull", data=outputsVol.detach().cpu().squeeze().numpy())
h5file.close()
if argsTest.writeToTB:
curr_it = nBatch
lastBatchSize = 1
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
LFImage = LF2Spatial(inputGPU, inputGPU.shape[2:])
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
z_proj = outputsGT[0,:,:,:,:].sum(3)
writer.add_image('z_proj_GT',(z_proj/z_proj.max()).detach().cpu(),curr_it)
z_proj = outputsVol[0,:,:,:,:].sum(3)
writer.add_image('z_proj_prediction',(z_proj/z_proj.max()).detach().cpu(),curr_it)
writer.add_image('LFImage_in', LFImage[0,:,:,:], curr_it)
writer.add_scalar('Loss/test', curr_loss, curr_it)
writer.add_scalar('Loss/psnr', curr_psnr, curr_it)
writer.add_scalar('Loss/ssim', curr_ssim, curr_it)
writer.add_scalar('times/val', curr_time, curr_it)
print('Img: ' + str(nBatch) + '/' + str(len(test_dataset)) + " L1: " + str(curr_loss) + " psnr: " + str(curr_psnr) + " SSIM: " + str(curr_ssim) + " recon_time: " + str(curr_time))
print("avg_loss: " + str(avg_loss) + " avg_psnr: " + str(avg_psnr) + " avg_ssim: " + str(avg_ssim) + " avg_time: " + str(avg_time) + "ms")
writer.close()
|
the-stack_0_22191 | import tensorflow as tf
import numpy as np
class OutputChecks:
model_output = None
state_object = None
accuracy_over_time = None
bot_data_over_time = None
requires_input = False
requires_bot_output = False
controls = None
def __init__(self, tf_session, action_handler, batch_size, model_output,
state_object=None,
bot=None,
model_placeholder=None,
batch_size_placeholder=None):
self.sess = tf_session
self.batch_size = batch_size
self.state_object = state_object
self.tutorial_bot = bot
self.model_output = model_output
self.model_input = model_placeholder
self.actionHandler = action_handler
self.batch_size_placeholder = batch_size_placeholder
if self.tutorial_bot is None:
self.requires_bot_output = True
if self.model_input is not None:
self.requires_input = True
def create_model(self):
# clear history
self.accuracy_over_time = []
self.bot_data_over_time = []
self.controls = tf.transpose(
self.actionHandler.create_tensorflow_controller_from_selection(self.model_output,
self.batch_size))
def get_amounts(self, input_array=None, bot_output=None):
if not self.requires_bot_output:
bot_output = self.sess.run(self.tutorial_bot.get_output_vector_model(self.state_object))
else:
if bot_output is None:
print("Missing correct output")
return
if not self.requires_input:
output = self.sess.run(self.controls)
else:
output = self.sess.run(self.controls, feed_dict={self.model_input: input_array,
self.batch_size_placeholder: np.array([self.batch_size])})
accuracy = np.sum(np.isclose(output, bot_output, 0.2), 1) / np.size(output[1])
self.accuracy_over_time.append(accuracy)
self.bot_data_over_time.append((output, bot_output))
analog_buckets = [-1.0001, -0.50001, -0.1000, 0.1000, 0.50001, 1.0001]
boolean_buckets = [-0.001, 0.50001, 1.0001]
np.set_printoptions(formatter={'int': '{0:5}'.format})
names = ["Throttle", "Steer", "Pitch", "Yaw", "Roll", "Jump", "Boost", "Handbrake"]
print("Splitting up everything in ranges: [-1, -0.5>, [-0.5, -0.1>, [0], <0.1+, 0.5], <0.5, 1]")
print("Real is model output, Expt is tutorialbot output and Acc. is accuracy")
for i in range(8):
print("From here the ranges are [0.0, 0.5>, [0.5, 1.0]") if i is 5 else None
print(names[i] + ":")
buckets = analog_buckets if i < 5 else boolean_buckets
print(" Real: ", np.histogram(output[i], buckets)[0])
print(" Expt: ", np.histogram(bot_output[i], buckets)[0])
print(" Acc.: ", accuracy[i])
print("Overall accuracy: ", np.sum(accuracy) / 8.0)
def get_final_stats(self):
def average(numbers):
return sum(numbers) / len(numbers)
number_prints = len(self.accuracy_over_time)
accuracy = np.transpose(self.accuracy_over_time)
np.set_printoptions(formatter={'float': '{: 0.2f}'.format})
percentages = [10, 25, 50]
names = ["Throttle", "Steer", "Pitch", "Yaw", "Roll", "Jump", "Boost", "Handbrake"]
print("Every action is printed multiple times, once all values and then averages over percentages")
for n in range(8):
print(names[n] + ":")
print("All: ", accuracy[n])
for p in percentages:
r = int(100 / p)
step = int(number_prints * p / 100)
print(str(p) + "%:", np.array([average(accuracy[n][int(i * step):int(i * step + step) if not int(i * step + step) is int(i * step) else int(i * step) + 1]) for i in range(r)]))
|
the-stack_0_22192 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReservationOrderOperations(object):
"""ReservationOrderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def calculate(
self,
body, # type: "_models.PurchaseRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.CalculatePriceResponse"
"""Calculate price for a ``ReservationOrder``.
Calculate price for placing a ``ReservationOrder``.
:param body: Information needed for calculate or purchase reservation.
:type body: ~azure.mgmt.reservations.models.PurchaseRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CalculatePriceResponse, or the result of cls(response)
:rtype: ~azure.mgmt.reservations.models.CalculatePriceResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculatePriceResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.calculate.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PurchaseRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CalculatePriceResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate.metadata = {'url': '/providers/Microsoft.Capacity/calculatePrice'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ReservationOrderList"]
"""Get all ``ReservationOrder``\ s.
List of all the ``ReservationOrder``\ s that the user has access to in the current tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReservationOrderList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.reservations.models.ReservationOrderList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReservationOrderList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ReservationOrderList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Capacity/reservationOrders'} # type: ignore
def _purchase_initial(
self,
reservation_order_id, # type: str
body, # type: "_models.PurchaseRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ReservationOrderResponse"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReservationOrderResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._purchase_initial.metadata['url'] # type: ignore
path_format_arguments = {
'reservationOrderId': self._serialize.url("reservation_order_id", reservation_order_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PurchaseRequest')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ReservationOrderResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ReservationOrderResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_purchase_initial.metadata = {'url': '/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}'} # type: ignore
def begin_purchase(
self,
reservation_order_id, # type: str
body, # type: "_models.PurchaseRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReservationOrderResponse"]
"""Purchase ``ReservationOrder``.
Purchase ``ReservationOrder`` and create resource under the specified URI.
:param reservation_order_id: Order Id of the reservation.
:type reservation_order_id: str
:param body: Information needed for calculate or purchase reservation.
:type body: ~azure.mgmt.reservations.models.PurchaseRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReservationOrderResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.reservations.models.ReservationOrderResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReservationOrderResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._purchase_initial(
reservation_order_id=reservation_order_id,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReservationOrderResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'reservationOrderId': self._serialize.url("reservation_order_id", reservation_order_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_purchase.metadata = {'url': '/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}'} # type: ignore
def get(
self,
reservation_order_id, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ReservationOrderResponse"
"""Get a specific ``ReservationOrder``.
Get the details of the ``ReservationOrder``.
:param reservation_order_id: Order Id of the reservation.
:type reservation_order_id: str
:param expand: May be used to expand the planInformation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReservationOrderResponse, or the result of cls(response)
:rtype: ~azure.mgmt.reservations.models.ReservationOrderResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReservationOrderResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'reservationOrderId': self._serialize.url("reservation_order_id", reservation_order_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReservationOrderResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}'} # type: ignore
def change_directory(
self,
reservation_order_id, # type: str
body, # type: "_models.ChangeDirectoryRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ChangeDirectoryResponse"
"""Change directory of ``ReservationOrder``.
Change directory (tenant) of ``ReservationOrder`` and all ``Reservation`` under it to specified
tenant id.
:param reservation_order_id: Order Id of the reservation.
:type reservation_order_id: str
:param body: Information needed to change directory of reservation order.
:type body: ~azure.mgmt.reservations.models.ChangeDirectoryRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ChangeDirectoryResponse, or the result of cls(response)
:rtype: ~azure.mgmt.reservations.models.ChangeDirectoryResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ChangeDirectoryResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-15-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.change_directory.metadata['url'] # type: ignore
path_format_arguments = {
'reservationOrderId': self._serialize.url("reservation_order_id", reservation_order_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ChangeDirectoryRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ChangeDirectoryResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
change_directory.metadata = {'url': '/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/changeDirectory'} # type: ignore
|
the-stack_0_22193 | import socket
def invert_dict(dict):
rv = {}
for key, values in dict.items():
for value in values:
if value in rv:
raise ValueError
rv[value] = key
return rv
class HTTPException(Exception):
def __init__(self, code, message=""):
self.code = code
self.message = message
def _open_socket(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(5)
return sock
def is_bad_port(port):
"""
Bad port as per https://fetch.spec.whatwg.org/#port-blocking
"""
return port in [
1, # tcpmux
7, # echo
9, # discard
11, # systat
13, # daytime
15, # netstat
17, # qotd
19, # chargen
20, # ftp-data
21, # ftp
22, # ssh
23, # telnet
25, # smtp
37, # time
42, # name
43, # nicname
53, # domain
77, # priv-rjs
79, # finger
87, # ttylink
95, # supdup
101, # hostriame
102, # iso-tsap
103, # gppitnp
104, # acr-nema
109, # pop2
110, # pop3
111, # sunrpc
113, # auth
115, # sftp
117, # uucp-path
119, # nntp
123, # ntp
135, # loc-srv / epmap
139, # netbios
143, # imap2
179, # bgp
389, # ldap
427, # afp (alternate)
465, # smtp (alternate)
512, # print / exec
513, # login
514, # shell
515, # printer
526, # tempo
530, # courier
531, # chat
532, # netnews
540, # uucp
548, # afp
556, # remotefs
563, # nntp+ssl
587, # smtp (outgoing)
601, # syslog-conn
636, # ldap+ssl
993, # ldap+ssl
995, # pop3+ssl
2049, # nfs
3659, # apple-sasl
4045, # lockd
6000, # x11
6665, # irc (alternate)
6666, # irc (alternate)
6667, # irc (default)
6668, # irc (alternate)
6669, # irc (alternate)
6697, # irc+tls
]
def get_port(host):
port = 0
while True:
free_socket = _open_socket(host, 0)
port = free_socket.getsockname()[1]
free_socket.close()
if not is_bad_port(port):
break
return port
|
the-stack_0_22194 | """
Tests of the XBlock-family functionality mixins
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
from unittest import TestCase
import ddt as ddt
from lxml import etree
import mock
import pytz
import six
from xblock.core import XBlock, XBlockAside
from xblock.fields import List, Scope, Integer, String, ScopeIds, UNIQUE_ID, DateTime
from xblock.field_data import DictFieldData
from xblock.mixins import ScopedStorageMixin, HierarchyMixin, IndexInfoMixin, ViewsMixin, XML_NAMESPACES
from xblock.runtime import Runtime
class AttrAssertionMixin(TestCase):
"""
A mixin to add attribute assertion methods to TestCases.
"""
def assertHasAttr(self, obj, attr):
"Assert that `obj` has the attribute named `attr`."
self.assertTrue(hasattr(obj, attr), "{!r} doesn't have attribute {!r}".format(obj, attr))
def assertNotHasAttr(self, obj, attr):
"Assert that `obj` doesn't have the attribute named `attr`."
self.assertFalse(hasattr(obj, attr), "{!r} has attribute {!r}".format(obj, attr))
class TestScopedStorageMixin(AttrAssertionMixin, TestCase):
"Tests of the ScopedStorageMixin."
class ScopedStorageMixinTester(ScopedStorageMixin):
"""Toy class for ScopedStorageMixin testing"""
field_a = Integer(scope=Scope.settings)
field_b = Integer(scope=Scope.content)
class ChildClass(ScopedStorageMixinTester):
"""Toy class for ModelMetaclass testing"""
pass
class FieldsMixin(object):
"""Toy mixin for field testing"""
field_c = Integer(scope=Scope.settings)
class MixinChildClass(FieldsMixin, ScopedStorageMixinTester):
"""Toy class for ScopedStorageMixin testing with mixed-in fields"""
pass
class MixinGrandchildClass(MixinChildClass):
"""Toy class for ScopedStorageMixin testing with inherited mixed-in fields"""
pass
def test_scoped_storage_mixin(self):
# `ModelMetaclassTester` and `ChildClass` both obtain the `fields` attribute
# from the `ModelMetaclass`. Since this is not understood by static analysis,
# silence this error for the duration of this test.
# pylint: disable=E1101
self.assertIsNot(self.ScopedStorageMixinTester.fields, self.ChildClass.fields)
self.assertHasAttr(self.ScopedStorageMixinTester, 'field_a')
self.assertHasAttr(self.ScopedStorageMixinTester, 'field_b')
self.assertIs(self.ScopedStorageMixinTester.field_a, self.ScopedStorageMixinTester.fields['field_a'])
self.assertIs(self.ScopedStorageMixinTester.field_b, self.ScopedStorageMixinTester.fields['field_b'])
self.assertHasAttr(self.ChildClass, 'field_a')
self.assertHasAttr(self.ChildClass, 'field_b')
self.assertIs(self.ChildClass.field_a, self.ChildClass.fields['field_a'])
self.assertIs(self.ChildClass.field_b, self.ChildClass.fields['field_b'])
def test_with_mixins(self):
# Testing model metaclass with mixins
# `MixinChildClass` and `MixinGrandchildClass` both obtain the `fields` attribute
# from the `ScopedStorageMixin`. Since this is not understood by static analysis,
# silence this error for the duration of this test.
# pylint: disable=E1101
self.assertHasAttr(self.MixinChildClass, 'field_a')
self.assertHasAttr(self.MixinChildClass, 'field_c')
self.assertIs(self.MixinChildClass.field_a, self.MixinChildClass.fields['field_a'])
self.assertIs(self.FieldsMixin.field_c, self.MixinChildClass.fields['field_c'])
self.assertHasAttr(self.MixinGrandchildClass, 'field_a')
self.assertHasAttr(self.MixinGrandchildClass, 'field_c')
self.assertIs(self.MixinGrandchildClass.field_a, self.MixinGrandchildClass.fields['field_a'])
self.assertIs(self.MixinGrandchildClass.field_c, self.MixinGrandchildClass.fields['field_c'])
class TestHierarchyMixin(AttrAssertionMixin, TestCase):
"Tests of the HierarchyMixin."
class HasChildren(HierarchyMixin):
"""Toy class for ChildrenModelMetaclass testing"""
has_children = True
class WithoutChildren(HierarchyMixin):
"""Toy class for ChildrenModelMetaclass testing"""
pass
class InheritedChildren(HasChildren):
"""Toy class for ChildrenModelMetaclass testing"""
pass
def test_children_metaclass(self):
# `HasChildren` and `WithoutChildren` both obtain the `children` attribute and
# the `has_children` method from the `ChildrenModelMetaclass`. Since this is not
# understood by static analysis, silence this error for the duration of this test.
# pylint: disable=E1101
self.assertTrue(self.HasChildren.has_children)
self.assertFalse(self.WithoutChildren.has_children)
self.assertTrue(self.InheritedChildren.has_children)
self.assertHasAttr(self.HasChildren, 'children')
self.assertNotHasAttr(self.WithoutChildren, 'children')
self.assertHasAttr(self.InheritedChildren, 'children')
self.assertIsInstance(self.HasChildren.children, List)
self.assertEqual(Scope.children, self.HasChildren.children.scope)
self.assertIsInstance(self.InheritedChildren.children, List)
self.assertEqual(Scope.children, self.InheritedChildren.children.scope)
class TestIndexInfoMixin(AttrAssertionMixin):
"""
Tests for Index
"""
class IndexInfoMixinTester(IndexInfoMixin):
"""Test class for index mixin"""
pass
def test_index_info(self):
self.assertHasAttr(self.IndexInfoMixinTester, 'index_dictionary')
with_index_info = self.IndexInfoMixinTester().index_dictionary()
self.assertFalse(with_index_info)
self.assertTrue(isinstance(with_index_info, dict))
class TestViewsMixin(TestCase):
"""
Tests for ViewsMixin
"""
def test_supports_view_decorator(self):
"""
Tests the @supports decorator for xBlock view methods
"""
class SupportsDecoratorTester(ViewsMixin):
"""
Test class for @supports decorator
"""
@ViewsMixin.supports("a_functionality")
def functionality_supported_view(self):
"""
A view that supports a functionality
"""
pass # pragma: no cover
@ViewsMixin.supports("functionality1", "functionality2")
def multi_featured_view(self):
"""
A view that supports multiple functionalities
"""
pass # pragma: no cover
def an_unsupported_view(self):
"""
A view that does not support any functionality
"""
pass # pragma: no cover
test_xblock = SupportsDecoratorTester()
for view_name, functionality, expected_result in (
("functionality_supported_view", "a_functionality", True),
("functionality_supported_view", "bogus_functionality", False),
("functionality_supported_view", None, False),
("an_unsupported_view", "a_functionality", False),
("multi_featured_view", "functionality1", True),
("multi_featured_view", "functionality2", True),
("multi_featured_view", "bogus_functionality", False),
):
self.assertEqual(
test_xblock.has_support(getattr(test_xblock, view_name), functionality),
expected_result
)
def test_has_support_override(self):
"""
Tests overriding has_support
"""
class HasSupportOverrideTester(ViewsMixin):
"""
Test class for overriding has_support
"""
def has_support(self, view, functionality):
"""
Overrides implementation of has_support
"""
return functionality == "a_functionality"
test_xblock = HasSupportOverrideTester()
for view_name, functionality, expected_result in (
("functionality_supported_view", "a_functionality", True),
("functionality_supported_view", "bogus_functionality", False),
):
self.assertEqual(
test_xblock.has_support(getattr(test_xblock, view_name, None), functionality),
expected_result
)
@ddt.ddt
class TestXmlSerializationMixin(TestCase):
""" Tests for XmlSerialization Mixin """
# pylint:disable=invalid-name
class TestXBlock(XBlock):
""" XBlock for XML export test """
etree_node_tag = 'test_xblock'
str_field = String()
str_str_default = String(default="default")
str_str_default_force_export = String(default="default", force_export=True)
str_uid_default = String(default=UNIQUE_ID)
str_uid_default_force_export = String(default=UNIQUE_ID, force_export=True)
str_none_default = String(default=None)
str_none_default_force_export = String(default=None, force_export=True)
# pylint:disable=invalid-name
class TestXBlockAside(XBlockAside):
""" XBlockAside for XML export test """
etree_node_tag = 'test_xblock_aside'
str_field = String()
str_str_default = String(default="default")
class TestXBlockWithDateTime(XBlock):
""" XBlock for DateTime fields export """
etree_node_tag = 'test_xblock_with_datetime'
datetime = DateTime(default=None)
def setUp(self):
"""
Construct test XBlocks.
"""
self.test_xblock = self._make_block(self.TestXBlock)
self.test_xblock_tag = self.TestXBlock.etree_node_tag
self.test_xblock_datetime = self._make_block(self.TestXBlockWithDateTime)
self.test_xblock_datetime_tag = self.TestXBlockWithDateTime.etree_node_tag
self.test_xblock_aside = self._make_block(self.TestXBlockAside)
self.test_xblock_aside_tag = self.TestXBlockAside.etree_node_tag
def _make_block(self, block_type=None):
""" Creates a test block """
block_type = block_type if block_type else self.TestXBlock
runtime_mock = mock.Mock(spec=Runtime)
scope_ids = ScopeIds("user_id", block_type.etree_node_tag, "def_id", "usage_id")
return block_type(runtime=runtime_mock, field_data=DictFieldData({}), scope_ids=scope_ids)
def _assert_node_attributes(self, node, expected_attributes, entry_point=None):
""" Checks XML node attributes to match expected_attributes"""
node_attributes = list(node.keys())
node_attributes.remove('xblock-family')
self.assertEqual(node.get('xblock-family'), entry_point if entry_point else self.TestXBlock.entry_point)
self.assertEqual(set(node_attributes), set(expected_attributes.keys()))
for key, value in six.iteritems(expected_attributes):
if value != UNIQUE_ID:
self.assertEqual(node.get(key), value)
else:
self.assertIsNotNone(node.get(key))
def _assert_node_elements(self, node, expected_elements):
"""
Checks XML node elements to match expected elements.
"""
node_elements = list(node)
self.assertEqual(set([elem.tag for elem in node_elements]), set(expected_elements.keys()))
# All elements on the node are expected to have a "none"="true" attribute.
for elem in node:
self.assertEqual(elem.get('none'), 'true')
def test_no_fields_set_add_xml_to_node(self):
"""
Tests that no fields are set on a TestXBlock when initially made
and no fields are present in the XML (besides force-exported defaults).
"""
node = etree.Element(self.test_xblock_tag)
# Precondition check: no fields are set.
for field_name in six.iterkeys(self.test_xblock.fields):
self.assertFalse(self.test_xblock.fields[field_name].is_set_on(self.test_xblock))
self.test_xblock.add_xml_to_node(node)
self._assert_node_attributes(
node,
{
'str_str_default_force_export': 'default',
'str_uid_default_force_export': UNIQUE_ID
}
)
self._assert_node_elements(
node,
{
# The tag is prefixed with {namespace}.
'{{{}}}{}'.format(
XML_NAMESPACES["option"],
'str_none_default_force_export'
): None
}
)
def test_set_fields_add_xml_to_node(self):
"""
Tests that set fields appear in XML after add_xml_to_node.
"""
node = etree.Element(self.test_xblock_tag)
self.test_xblock.str_field = 'str_field_val'
self.test_xblock.str_str_default = 'str_str_default_val'
self.test_xblock.str_str_default_force_export = 'str_str_default_force_export_val'
self.test_xblock.str_uid_default = 'str_uid_default_val'
self.test_xblock.str_uid_default_force_export = 'str_uid_default_force_export_val'
self.test_xblock.str_none_default = 'str_none_default_val'
self.test_xblock.str_none_default_force_export = 'str_none_default_force_export_val'
self.test_xblock.add_xml_to_node(node)
self._assert_node_attributes(
node,
{
'str_field': 'str_field_val',
'str_str_default': 'str_str_default_val',
'str_str_default_force_export': 'str_str_default_force_export_val',
'str_uid_default': 'str_uid_default_val',
'str_uid_default_force_export': 'str_uid_default_force_export_val',
'str_none_default': 'str_none_default_val',
'str_none_default_force_export': 'str_none_default_force_export_val',
}
)
self._assert_node_elements(node, {})
def test_set_field_to_none_add_xml_to_node(self):
"""
Tests add_xml_to_node with String field value set to None.
"""
node = etree.Element(self.test_xblock_tag)
# Now set all fields to None.
self.test_xblock.str_field = None
self.test_xblock.str_str_default = None
self.test_xblock.str_str_default_force_export = None
self.test_xblock.str_uid_default = None
self.test_xblock.str_uid_default_force_export = None
self.test_xblock.str_none_default = None
self.test_xblock.str_none_default_force_export = None
self.test_xblock.add_xml_to_node(node)
self._assert_node_attributes(node, {})
self._assert_node_elements(
node,
{
# The tags are prefixed with {namespace}.
'{{{}}}{}'.format(XML_NAMESPACES["option"], tag): None
for tag in [
'str_field',
'str_str_default',
'str_str_default_force_export',
'str_uid_default',
'str_uid_default_force_export',
'str_none_default',
'str_none_default_force_export'
]
}
)
def test_set_unset_then_add_xml_to_node(self):
"""
Tests add_xml_to_node with non-UNIQUE_ID String field value unset after being set.
"""
node = etree.Element(self.test_xblock_tag)
# Now set some fields to values.
self.test_xblock.str_field = None
self.test_xblock.str_str_default = 'water is wet'
self.test_xblock.str_str_default_force_export = ''
self.test_xblock.str_uid_default = 'smart'
self.test_xblock.str_uid_default_force_export = '47'
self.test_xblock.str_none_default = ''
self.test_xblock.str_none_default_force_export = None
# Now unset those same fields.
del self.test_xblock.str_field
del self.test_xblock.str_str_default
del self.test_xblock.str_str_default_force_export
del self.test_xblock.str_uid_default
del self.test_xblock.str_uid_default_force_export
del self.test_xblock.str_none_default
del self.test_xblock.str_none_default_force_export
self.test_xblock.add_xml_to_node(node)
# The fields should no longer be present in the XML representation.
self._assert_node_attributes(
node,
{
'str_str_default_force_export': 'default',
'str_uid_default_force_export': UNIQUE_ID
}
)
self._assert_node_elements(
node,
{
# The tag is prefixed with {namespace}.
'{{{}}}{}'.format(
XML_NAMESPACES["option"],
'str_none_default_force_export'
): None
}
)
def test_xblock_aside_add_xml_to_node(self):
"""
Tests that add_xml_to_node works proper for xblock aside.
"""
node = etree.Element(self.test_xblock_aside_tag)
self.test_xblock_aside.str_field = 'str_field_val_aside'
self.test_xblock_aside.str_str_default = 'str_str_default_val'
self.test_xblock_aside.add_xml_to_node(node)
self._assert_node_attributes(
node,
{
'str_field': 'str_field_val_aside',
'str_str_default': 'str_str_default_val',
},
self.TestXBlockAside.entry_point
)
self._assert_node_elements(node, {})
@ddt.data(
(None, {'datetime': ''}),
(datetime(2014, 4, 1, 2, 3, 4, 567890).replace(tzinfo=pytz.utc), {'datetime': '2014-04-01T02:03:04.567890'})
)
@ddt.unpack
def test_datetime_serialization(self, value, expected_attributes):
"""
Tests exporting DateTime fields to XML
"""
node = etree.Element(self.test_xblock_datetime_tag)
self.test_xblock_datetime.datetime = value
self.test_xblock_datetime.add_xml_to_node(node)
self._assert_node_attributes(node, expected_attributes)
|
the-stack_0_22196 | import inspect
from collections import OrderedDict
from typing import List, Callable, Tuple
import torch
from pytorch_toolbelt.modules import ACT_RELU, get_activation_block
from pytorch_toolbelt.modules.encoders import EncoderModule, make_n_channel_input
from torch import nn, Tensor
__all__ = ["StackedHGEncoder", "StackedSupervisedHGEncoder"]
def conv1x1_bn_act(in_channels, out_channels, activation=nn.ReLU):
return nn.Sequential(
OrderedDict(
[
("conv", nn.Conv2d(in_channels, out_channels, kernel_size=1)),
("bn", nn.BatchNorm2d(out_channels)),
("act", activation(inplace=True)),
]
)
)
class HGResidualBlock(nn.Module):
def __init__(self, input_channels: int, output_channels: int, reduction=2, activation: Callable = nn.ReLU):
super(HGResidualBlock, self).__init__()
mid_channels = input_channels // reduction
self.bn1 = nn.BatchNorm2d(input_channels)
self.act1 = activation(inplace=True)
self.conv1 = nn.Conv2d(input_channels, mid_channels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
self.act2 = activation(inplace=True)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.act3 = activation(inplace=True)
self.conv3 = nn.Conv2d(mid_channels, output_channels, kernel_size=1, bias=True)
if input_channels == output_channels:
self.skip_layer = nn.Identity()
else:
self.skip_layer = nn.Conv2d(input_channels, output_channels, kernel_size=1)
torch.nn.init.zeros_(self.skip_layer.bias)
torch.nn.init.zeros_(self.conv3.bias)
def forward(self, x: Tensor) -> Tensor: # skipcq: PYL-W0221
residual = self.skip_layer(x)
out = self.bn1(x)
out = self.act1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.act2(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.act3(out)
out = self.conv3(out)
out += residual
return out
class HGStemBlock(nn.Module):
def __init__(self, input_channels, output_channels, activation: Callable = nn.ReLU):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, 16, kernel_size=3, padding=1, stride=2, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.act1 = activation(inplace=True)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.act2 = activation(inplace=True)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.act3 = activation(inplace=True)
self.residual1 = HGResidualBlock(64, 128)
self.residual2 = HGResidualBlock(128, output_channels)
def forward(self, x: Tensor) -> Tensor: # skipcq: PYL-W0221
x = self.act1(self.bn1(self.conv1(x)))
x = self.act2(self.bn2(self.conv2(x)))
x = self.act3(self.bn3(self.conv3(x)))
x = self.residual1(x)
x = self.residual2(x)
return x
class HGBlock(nn.Module):
"""
A single Hourglass model block.
"""
def __init__(
self,
depth: int,
input_features: int,
features,
increase=0,
activation=nn.ReLU,
repeats=1,
pooling_block=nn.MaxPool2d,
):
super(HGBlock, self).__init__()
nf = features + increase
if inspect.isclass(pooling_block) and issubclass(pooling_block, (nn.MaxPool2d, nn.AvgPool2d)):
self.down = pooling_block(kernel_size=2, padding=0, stride=2)
else:
self.down = pooling_block(input_features)
if repeats == 1:
self.up1 = HGResidualBlock(input_features, features, activation=activation)
self.low1 = HGResidualBlock(input_features, nf, activation=activation)
else:
up_blocks = []
up_input_features = input_features
for _ in range(repeats):
up_blocks.append(HGResidualBlock(up_input_features, features))
up_input_features = features
self.up1 = nn.Sequential(*up_blocks)
down_blocks = []
down_input_features = input_features
for _ in range(repeats):
up_blocks.append(HGResidualBlock(down_input_features, nf))
down_input_features = nf
self.low1 = nn.Sequential(*down_blocks)
self.depth = depth
# Recursive hourglass
if self.depth > 1:
self.low2 = HGBlock(
depth - 1,
nf,
nf,
increase=increase,
pooling_block=pooling_block,
activation=activation,
repeats=repeats,
)
else:
self.low2 = HGResidualBlock(nf, nf, activation=activation)
self.low3 = HGResidualBlock(nf, features, activation=activation)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
def forward(self, x: Tensor) -> Tensor: # skipcq: PYL-W0221
up1 = self.up1(x)
pool1 = self.down(x)
low1 = self.low1(pool1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up(low3)
hg = up1 + up2
return hg
class HGFeaturesBlock(nn.Module):
def __init__(self, features: int, activation: Callable, blocks=1):
super().__init__()
residual_blocks = [HGResidualBlock(features, features, activation=activation) for _ in range(blocks)]
self.residuals = nn.Sequential(*residual_blocks)
self.linear = conv1x1_bn_act(features, features, activation=activation)
def forward(self, x: Tensor) -> Tensor: # skipcq: PYL-W0221
x = self.residuals(x)
x = self.linear(x)
return x
class HGSupervisionBlock(nn.Module):
def __init__(self, features, supervision_channels: int):
super().__init__()
self.squeeze = nn.Conv2d(features, supervision_channels, kernel_size=1)
self.expand = nn.Conv2d(supervision_channels, features, kernel_size=1)
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor]: # skipcq: PYL-W0221
sup_mask = self.squeeze(x)
sup_features = self.expand(sup_mask)
return sup_mask, sup_features
class StackedHGEncoder(EncoderModule):
"""
Original implementation: https://github.com/princeton-vl/pytorch_stacked_hourglass/blob/master/models/layers.py
"""
def __init__(
self,
input_channels: int = 3,
stack_level: int = 8,
depth: int = 4,
features: int = 256,
activation=ACT_RELU,
repeats=1,
pooling_block=nn.MaxPool2d,
):
super().__init__(
channels=[features] + [features] * stack_level,
strides=[4] + [4] * stack_level,
layers=list(range(0, stack_level + 1)),
)
self.stack_level = stack_level
self.depth_level = depth
self.num_features = features
act = get_activation_block(activation)
self.stem = HGStemBlock(input_channels, features, activation=act)
input_features = features
modules = []
for _ in range(stack_level):
modules.append(
HGBlock(
depth,
input_features,
features,
increase=0,
activation=act,
repeats=repeats,
pooling_block=pooling_block,
)
)
input_features = features
self.num_blocks = len(modules)
self.blocks = nn.ModuleList(modules)
self.features = nn.ModuleList(
[HGFeaturesBlock(features, blocks=4, activation=act) for _ in range(stack_level)]
)
self.merge_features = nn.ModuleList(
[nn.Conv2d(features, features, kernel_size=1) for _ in range(stack_level - 1)]
)
def __str__(self):
return f"hg_s{self.stack_level}_d{self.depth_level}_f{self.num_features}"
def forward(self, x: Tensor) -> List[Tensor]: # skipcq: PYL-W0221
x = self.stem(x)
outputs = [x]
for i, hourglass in enumerate(self.blocks):
features = self.features[i](hourglass(x))
outputs.append(features)
if i < self.num_blocks - 1:
x = x + self.merge_features[i](features)
return outputs
def change_input_channels(self, input_channels: int, mode="auto"):
self.stem.conv1 = make_n_channel_input(self.stem.conv1, input_channels, mode)
return self
@property
def encoder_layers(self) -> List[nn.Module]:
return [self.stem] + list(self.blocks)
class StackedSupervisedHGEncoder(StackedHGEncoder):
def __init__(
self,
supervision_channels: int,
input_channels: int = 3,
stack_level: int = 8,
depth: int = 4,
features: int = 256,
activation=ACT_RELU,
repeats=1,
pooling_block=nn.MaxPool2d,
supervision_block=HGSupervisionBlock,
):
super().__init__(
input_channels=input_channels,
stack_level=stack_level,
depth=depth,
features=features,
activation=activation,
repeats=repeats,
pooling_block=pooling_block,
)
self.supervision_blocks = nn.ModuleList(
[supervision_block(features, supervision_channels) for _ in range(stack_level - 1)]
)
def forward(self, x: Tensor) -> Tuple[List[Tensor], List[Tensor]]: # skipcq: PYL-W0221
x = self.stem(x)
outputs = [x]
supervision = []
for i, hourglass in enumerate(self.blocks):
features = self.features[i](hourglass(x))
outputs.append(features)
if i < self.num_blocks - 1:
sup_mask, sup_features = self.supervision_blocks[i](features)
supervision.append(sup_mask)
x = x + self.merge_features[i](features) + sup_features
return outputs, supervision
|
the-stack_0_22197 | from datetime import date, datetime
from decimal import Decimal
import logging
import re
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import get_callable
from django.db import models
from django.db.models import F, ObjectDoesNotExist, Sum, Q
from django.utils.translation import ugettext_lazy as _
import plata
from plata.fields import CurrencyField, JSONField
from plata.shop.countries import countries
logger = logging.getLogger('plata.shop.order')
#modified by fruitschen
try:
from custom_site.custom_order import OrderMinxin
except:
class OrderMinxin(object):pass
class TaxClass(models.Model):
"""
Tax class, storing a tax rate
TODO informational / advisory currency or country fields?
"""
name = models.CharField(_('name'), max_length=100)
rate = models.DecimalField(_('rate'), max_digits=10, decimal_places=2,
help_text=_('Tax rate in percent.'))
priority = models.PositiveIntegerField(_('priority'), default=0,
help_text = _('Used to order the tax classes in the administration interface.'))
class Meta:
ordering = ['-priority']
verbose_name = _('tax class')
verbose_name_plural = _('tax classes')
def __unicode__(self):
return self.name
class BillingShippingAddress(models.Model):
"""
Abstract base class for all models storing a billing and a shipping address
"""
ADDRESS_FIELDS = ['company', 'first_name', 'last_name', 'address',
'zip_code', 'city', 'country', 'call_prefix', 'phone']
billing_company = models.CharField(_('company'), max_length=100, blank=True)
billing_first_name = models.CharField(_('first name'), max_length=100)
billing_last_name = models.CharField(_('last name'), max_length=100)
billing_call_prefix = models.CharField(_('phone prefix'), max_length=100)
billing_phone = models.CharField(_('Phone'), max_length=50)
billing_address = models.TextField(_('address'))
billing_zip_code = models.CharField(_('ZIP code'), max_length=50)
billing_city = models.CharField(_('city'), max_length=100)
billing_country = models.CharField(_('country'), max_length=3, choices=countries)
shipping_same_as_billing = models.BooleanField(_('shipping address equals billing address'),
default=True)
shipping_company = models.CharField(_('company'), max_length=100, blank=True)
shipping_first_name = models.CharField(_('first name'), max_length=100, blank=True)
shipping_last_name = models.CharField(_('last name'), max_length=100, blank=True)
shipping_address = models.TextField(_('address'), blank=True)
shipping_call_prefix = models.CharField(_('phone prefix'), max_length=100, blank=True)
shipping_phone = models.CharField(_('Phone'), max_length=50, blank=True)
shipping_zip_code = models.CharField(_('ZIP code'), max_length=50, blank=True)
shipping_city = models.CharField(_('city'), max_length=100, blank=True)
shipping_country = models.CharField(_('country'), max_length=3, blank=True, choices=countries)
class Meta:
abstract = True
def addresses(self):
"""
Return a ``dict`` containing a billing and a shipping address, taking
into account the value of the ``shipping_same_as_billing`` flag
"""
billing = dict((f, getattr(self, 'billing_%s' % f)) for f in self.ADDRESS_FIELDS)
if 'country' in billing:
billing['country'] = self.get_billing_country_display()
if self.shipping_same_as_billing:
shipping = billing
else:
shipping = dict((f, getattr(self, 'shipping_%s' % f)) for f in self.ADDRESS_FIELDS)
if 'country' in shipping:
shipping['country'] = self.get_shipping_country_display() or self.get_billing_country_display()
return {'billing': billing, 'shipping': shipping}
@classmethod
def address_fields(cls, prefix=''):
return ['%s%s' % (prefix, f) for f in cls.ADDRESS_FIELDS]
class Order(BillingShippingAddress, OrderMinxin):
"""The main order model. Used for carts and orders alike."""
#: Order object is a cart.
CART = 10
#: Checkout process has started.
CHECKOUT = 20
#: Order has been confirmed, but it not (completely) paid for yet.
CONFIRMED = 30
#: Order has been completely paid for.
PAID = 40
#: Order has been completed. Plata itself never sets this state,
#: it is only meant for use by the shop owners.
COMPLETED = 50
STATUS_CHOICES = (
(CART, _('Is a cart')),
(CHECKOUT, _('Checkout process started')),
(CONFIRMED, _('Order has been confirmed')),
(PAID, _('Order has been paid')),
(COMPLETED, _('Order has been completed')),
)
created = models.DateTimeField(_('created'), default=datetime.now)
confirmed = models.DateTimeField(_('confirmed'), blank=True, null=True)
user = models.ForeignKey(User, blank=True, null=True,
verbose_name=_('user'), related_name='orders')
language_code = models.CharField(_('language'), max_length=10,
default='', blank=True)
status = models.PositiveIntegerField(_('status'), choices=STATUS_CHOICES,
default=CART)
_order_id = models.CharField(_('order ID'), max_length=20, blank=True)
email = models.EmailField(_('e-mail address'))
currency = CurrencyField()
items_subtotal = models.DecimalField(_('subtotal'),
max_digits=18, decimal_places=4, default=Decimal('0.00'))
items_discount = models.DecimalField(_('items discount'),
max_digits=18, decimal_places=4, default=Decimal('0.00'))
items_tax = models.DecimalField(_('items tax'),
max_digits=18, decimal_places=4, default=Decimal('0.00'))
shipping_method = models.CharField(_('shipping method'),
max_length=100, blank=True)
shipping_cost = models.DecimalField(_('shipping cost'),
max_digits=18, decimal_places=4, blank=True, null=True)
shipping_discount = models.DecimalField(_('shipping discount'),
max_digits=18, decimal_places=4, blank=True, null=True)
shipping_tax = models.DecimalField(_('shipping tax'),
max_digits=18, decimal_places=4, default=Decimal('0.00'))
total = models.DecimalField(_('total'),
max_digits=18, decimal_places=4, default=Decimal('0.00'))
paid = models.DecimalField(_('paid'),
max_digits=18, decimal_places=4, default=Decimal('0.00'),
help_text=_('This much has been paid already.'))
notes = models.TextField(_('notes'), blank=True)
data = JSONField(_('data'), blank=True,
help_text=_('JSON-encoded additional data about the order payment.'))
class Meta:
verbose_name = _('order')
verbose_name_plural = _('orders')
def __unicode__(self):
return self.order_id
def save(self, *args, **kwargs):
"""Sequential order IDs for completed orders."""
if not self._order_id and self.status >= self.PAID:
try:
order = Order.objects.exclude(_order_id='').order_by('-_order_id')[0]
latest = int(re.sub(r'[^0-9]', '', order._order_id))
except (IndexError, ValueError):
latest = 0
self._order_id = 'O-%09d' % (latest + 1)
super(Order, self).save(*args, **kwargs)
@property
def order_id(self):
"""
Returns ``_order_id`` (if it has been set) or a generic ID for this order.
"""
if self._order_id:
return self._order_id
return u'No. %d' % self.id
def recalculate_total(self, save=True):
"""
Recalculates totals, discounts, taxes.
"""
items = list(self.items.all())
shared_state = {}
processor_classes = [get_callable(processor) for processor\
in plata.settings.PLATA_ORDER_PROCESSORS]
for p in (cls(shared_state) for cls in processor_classes):
p.process(self, items)
if save:
self.save()
[item.save() for item in items]
@property
def subtotal(self):
"""
Returns the order subtotal.
"""
# TODO: What about shipping?
return sum((item.subtotal for item in self.items.all()), Decimal('0.00')).quantize(Decimal('0.00'))
@property
def discount(self):
"""
Returns the discount total.
"""
# TODO: What about shipping?
return (sum((item.subtotal for item in self.items.all()), Decimal('0.00')) -
sum((item.discounted_subtotal for item in self.items.all()), Decimal('0.00'))).quantize(Decimal('0.00'))
@property
def shipping(self):
"""
Returns the shipping cost, with or without tax depending on the
``PLATA_PRICE_INCLUDES_TAX`` setting.
"""
if plata.settings.PLATA_PRICE_INCLUDES_TAX:
if self.shipping_cost is None:
return None
return self.shipping_cost - self.shipping_discount + self.shipping_tax
else:
logger.error('Shipping calculation with PLATA_PRICE_INCLUDES_TAX=False is not implemented yet')
raise NotImplementedError
@property
def tax(self):
"""
Returns the tax total for this order, meaning tax on order items and tax
on shipping.
"""
return (self.items_tax + self.shipping_tax).quantize(Decimal('0.00'))
@property
def balance_remaining(self):
"""
Returns the balance which needs to be paid by the customer to fully pay
this order. This value is not necessarily the same as the order total,
because there can be more than one order payment in principle.
"""
return (self.total - self.paid).quantize(Decimal('0.00'))
def is_paid(self):
import warnings
warnings.warn(
'Order.is_paid() has been deprecated because its name is misleading. '
'Test for `order.status >= order.PAID` or `not order.balance_remaining '
'yourself.',
DeprecationWarning)
return self.balance_remaining <= 0
#: This validator is always called; basic consistency checks such as whether the
#: currencies in the order match should be added here.
VALIDATE_BASE = 10
#: A cart which fails the criteria added to the ``VALIDATE_CART`` group isn't
#: considered a valid cart and the user cannot proceed to the checkout form.
#: Stuff such as stock checking, minimal order total checking, or maximal items
#: checking might be added here.
VALIDATE_CART = 20
#: This should not be used while registering a validator, it's mostly useful as
#: an argument to :meth:`~plata.shop.models.Order.validate` when you want to
#: run all validators.
VALIDATE_ALL = 100
VALIDATORS = {}
@classmethod
def register_validator(cls, validator, group):
"""
Registers another order validator in a validation group
A validator is a callable accepting an order (and only an order).
There are several types of order validators:
- Base validators are always called
- Cart validators: Need to validate for a valid cart
- Checkout validators: Need to validate in the checkout process
"""
cls.VALIDATORS.setdefault(group, []).append(validator)
def validate(self, group):
"""
Validates this order
The argument determines which order validators are called:
- ``Order.VALIDATE_BASE``
- ``Order.VALIDATE_CART``
- ``Order.VALIDATE_CHECKOUT``
- ``Order.VALIDATE_ALL``
"""
for g in sorted(g for g in self.VALIDATORS.keys() if g<=group):
for validator in self.VALIDATORS[g]:
validator(self)
def modify_item(self, product, relative=None, absolute=None, recalculate=True):
"""
Updates order with the given product
- ``relative`` or ``absolute``: Add/subtract or define order item amount exactly
- ``recalculate``: Recalculate order after cart modification (defaults to ``True``)
Returns the ``OrderItem`` instance; if quantity is zero, the order item instance
is deleted, the ``pk`` attribute set to ``None`` but the order item is returned
anway.
"""
assert (relative is None) != (absolute is None), 'One of relative or absolute must be provided.'
if self.status >= self.CONFIRMED:
raise ValidationError(_('Cannot modify order once it has been confirmed.'),
code='order_sealed')
try:
item = self.items.get(product=product)
except self.items.model.DoesNotExist:
item = self.items.model(
order=self,
product=product,
quantity=0,
currency=self.currency,
)
if relative is not None:
item.quantity += relative
else:
item.quantity = absolute
if item.quantity > 0:
try:
price = product.get_price(currency=self.currency, orderitem=item)
except ObjectDoesNotExist:
logger.error('No price could be found for %s with currency %s' % (
product, self.currency))
raise
price.handle_order_item(item)
product.handle_order_item(item)
item.save()
else:
if item.pk:
item.delete()
item.pk = None
if recalculate:
self.recalculate_total()
# Reload item instance from DB to preserve field values
# changed in recalculate_total
if item.pk:
item = self.items.get(pk=item.pk)
try:
self.validate(self.VALIDATE_BASE)
except ValidationError:
if item.pk:
item.delete()
raise
return item
@property
def discount_remaining(self):
"""Remaining discount amount excl. tax"""
return self.applied_discounts.remaining()
def update_status(self, status, notes):
"""
Update the order status
"""
if status >= Order.CHECKOUT:
if not self.items.count():
raise ValidationError(_('Cannot proceed to checkout without order items.'),
code='order_empty')
logger.info('Promoting %s to status %s' % (self, status))
instance = OrderStatus(
order=self,
status=status,
notes=notes)
instance.save()
def reload(self):
"""
Return this order instance, reloaded from the database
Used f.e. inside the payment processors when adding new payment records etc.
"""
return self.__class__._default_manager.get(pk=self.id)
def validate_order_currencies(order):
"""Check whether order contains more than one or an invalid currency"""
currencies = set(order.items.values_list('currency', flat=True))
if currencies and (len(currencies) > 1 or order.currency not in currencies):
raise ValidationError(_('Order contains more than one currency.'),
code='multiple_currency')
Order.register_validator(validate_order_currencies, Order.VALIDATE_BASE)
class OrderItem(models.Model):
"""Single order line item"""
order = models.ForeignKey(Order, related_name='items')
product = models.ForeignKey(plata.settings.PLATA_SHOP_PRODUCT, verbose_name=_('product'),
blank=True, null=True, on_delete=models.SET_NULL)
name = models.CharField(_('name'), max_length=100, blank=True)
sku = models.CharField(_('SKU'), max_length=100, blank=True)
quantity = models.IntegerField(_('quantity'))
currency = CurrencyField()
_unit_price = models.DecimalField(_('unit price'),
max_digits=18, decimal_places=4,
help_text=_('Unit price excl. tax'))
_unit_tax = models.DecimalField(_('unit tax'),
max_digits=18, decimal_places=4)
tax_rate = models.DecimalField(_('tax rate'), max_digits=10, decimal_places=2)
tax_class = models.ForeignKey(TaxClass, verbose_name=_('tax class'),
blank=True, null=True, on_delete=models.SET_NULL)
is_sale = models.BooleanField(_('is sale'))
_line_item_price = models.DecimalField(_('line item price'),
max_digits=18, decimal_places=4, default=0,
help_text=_('Line item price excl. tax'))
_line_item_discount = models.DecimalField(_('line item discount'),
max_digits=18, decimal_places=4,
blank=True, null=True,
help_text=_('Discount excl. tax'))
_line_item_tax = models.DecimalField(_('line item tax'),
max_digits=18, decimal_places=4, default=0)
data = JSONField(_('data'), blank=True,
help_text=_('JSON-encoded additional data about the order payment.'))
class Meta:
ordering = ('product',)
unique_together = (('order', 'product'),)
verbose_name = _('order item')
verbose_name_plural = _('order items')
def __unicode__(self):
return _(u'%(quantity)s of %(product)s') % {'quantity': self.quantity,
'product': self.product}
@property
def unit_price(self):
if plata.settings.PLATA_PRICE_INCLUDES_TAX:
return self._unit_price + self._unit_tax
return self._unit_price
@property
def line_item_discount_excl_tax(self):
return self._line_item_discount or 0
@property
def line_item_discount_incl_tax(self):
return self.line_item_discount_excl_tax * (1+self.tax_rate/100)
@property
def line_item_discount(self):
if plata.settings.PLATA_PRICE_INCLUDES_TAX:
return self.line_item_discount_incl_tax
else:
return self.line_item_discount_excl_tax
@property
def subtotal(self):
return self.unit_price * self.quantity
@property
def discounted_subtotal_excl_tax(self):
return self._line_item_price - (self._line_item_discount or 0)
@property
def discounted_subtotal_incl_tax(self):
return self.discounted_subtotal_excl_tax + self._line_item_tax
@property
def discounted_subtotal(self):
if plata.settings.PLATA_PRICE_INCLUDES_TAX:
return self.discounted_subtotal_incl_tax
else:
return self.discounted_subtotal_excl_tax
class OrderStatus(models.Model):
"""
Order status
Stored in separate model so that the order status changes stay
visible for analysis after the fact.
"""
order = models.ForeignKey(Order, related_name='statuses')
created = models.DateTimeField(_('created'), default=datetime.now)
status = models.PositiveIntegerField(_('status'), max_length=20, choices=Order.STATUS_CHOICES)
notes = models.TextField(_('notes'), blank=True)
class Meta:
ordering = ('created', 'id')
verbose_name = _('order status')
verbose_name_plural = _('order statuses')
def __unicode__(self):
return (_(u'Status %(status)s for %(order)s') % {
'status': self.get_status_display(),
'order': self.order})
def save(self, *args, **kwargs):
super(OrderStatus, self).save(*args, **kwargs)
self.order.status = self.status
if self.status >= Order.CONFIRMED and not self.order.confirmed:
self.order.confirmed = datetime.now()
elif self.status < Order.CONFIRMED:
# Ensure that the confirmed date is not set
self.order.confirmed = None
self.order.save()
class OrderPaymentManager(models.Manager):
def pending(self):
return self.filter(status=self.model.PENDING)
def authorized(self):
return self.filter(authorized__isnull=False)
class OrderPayment(models.Model):
"""
Order payment
Stores additional data from the payment interface for analysis
and accountability.
"""
PENDING = 10
PROCESSED = 20
AUTHORIZED = 30
STATUS_CHOICES = (
(PENDING, _('pending')),
(PROCESSED, _('processed')),
(AUTHORIZED, _('authorized')),
)
order = models.ForeignKey(Order, verbose_name=_('order'), related_name='payments')
timestamp = models.DateTimeField(_('timestamp'), default=datetime.now)
status = models.PositiveIntegerField(_('status'), choices=STATUS_CHOICES,
default=PENDING)
currency = CurrencyField()
amount = models.DecimalField(_('amount'), max_digits=10, decimal_places=2)
payment_module_key = models.CharField(_('payment module key'), max_length=20,
help_text=_('Machine-readable identifier for the payment module used.'))
payment_module = models.CharField(_('payment module'), max_length=50, blank=True,
help_text=_('For example \'Cash on delivery\', \'PayPal\', ...'))
payment_method = models.CharField(_('payment method'), max_length=50, blank=True,
help_text=_('For example \'MasterCard\', \'VISA\' or some other card.'))
transaction_id = models.CharField(_('transaction ID'), max_length=50, blank=True,
help_text=_('Unique ID identifying this payment in the foreign system.'))
authorized = models.DateTimeField(_('authorized'), blank=True, null=True,
help_text=_('Point in time when payment has been authorized.'))
notes = models.TextField(_('notes'), blank=True)
data = JSONField(_('data'), blank=True,
help_text=_('JSON-encoded additional data about the order payment.'))
class Meta:
ordering = ('-timestamp',)
verbose_name = _('order payment')
verbose_name_plural = _('order payments')
objects = OrderPaymentManager()
def __unicode__(self):
return (_(u'%(authorized)s of %(currency)s %(amount).2f for %(order)s')
% {'authorized': self.authorized and _(u'Authorized')
or _(u'Not authorized'),
'currency': self.currency,
'amount': self.amount,
'order': self.order})
def _recalculate_paid(self):
paid = OrderPayment.objects.authorized().filter(
order=self.order_id,
currency=F('order__currency'),
).aggregate(total=Sum('amount'))['total'] or 0
Order.objects.filter(id=self.order_id).update(paid=paid)
def save(self, *args, **kwargs):
super(OrderPayment, self).save(*args, **kwargs)
self._recalculate_paid()
if self.currency != self.order.currency:
self.order.notes += u'\n' + _('Currency of payment %s does not match.') % self
self.order.save()
def delete(self, *args, **kwargs):
super(OrderPayment, self).delete(*args, **kwargs)
self._recalculate_paid()
class PriceBase(models.Model):
"""
Price for a given product, currency, tax class and time period
Prices should not be changed or deleted but replaced by more recent prices.
(Deleting old prices does not hurt, but the price history cannot be
reconstructed anymore if you'd need it.)
The concrete implementation needs to provide a foreign key to the
product model.
"""
class Meta:
abstract = True
ordering = ['-id']
verbose_name = _('price')
verbose_name_plural = _('prices')
currency = CurrencyField()
_unit_price = models.DecimalField(_('unit price'), max_digits=18, decimal_places=4)
tax_included = models.BooleanField(_('tax included'),
help_text=_('Is tax included in given unit price?'),
default=plata.settings.PLATA_PRICE_INCLUDES_TAX)
tax_class = models.ForeignKey(TaxClass, verbose_name=_('tax class'),
related_name='+')
def __unicode__(self):
return u'%s %.2f' % (self.currency, self.unit_price)
def handle_order_item(self, item):
"""
Set price data on the ``OrderItem`` passed
"""
item._unit_price = self.unit_price_excl_tax
item._unit_tax = self.unit_tax
item.tax_rate = self.tax_class.rate
item.tax_class = self.tax_class
item.is_sale = False # Hardcoded; add an is_sale like in the ``Price`` class below
@property
def unit_tax(self):
return self.unit_price_excl_tax * (self.tax_class.rate/100)
@property
def unit_price_incl_tax(self):
if self.tax_included:
return self._unit_price
return self._unit_price * (1+self.tax_class.rate/100)
@property
def unit_price_excl_tax(self):
if not self.tax_included:
return self._unit_price
return self._unit_price / (1+self.tax_class.rate/100)
@property
def unit_price(self):
if plata.settings.PLATA_PRICE_INCLUDES_TAX:
return self.unit_price_incl_tax
else:
return self.unit_price_excl_tax
|
the-stack_0_22199 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy.testing import assert_allclose
from ...datasets import (
load_arf_fits_table,
load_poisson_stats_image,
load_psf_fits_table,
)
def test_load_arf_fits_table():
data = load_arf_fits_table()
assert len(data) == 2
def test_load_poisson_stats_image():
data = load_poisson_stats_image()
assert data.sum() == 40896
images = load_poisson_stats_image(extra_info=True)
refs = dict(counts=40896, model=41000, source=1000, background=40000)
for name, expected in refs.items():
assert_allclose(images[name].sum(), expected)
def test_load_psf_fits_table():
data = load_psf_fits_table()
assert len(data) == 2
|
the-stack_0_22200 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 16:58:46 2017
@author: kaihong
"""
from __future__ import print_function
import numpy as np
import scipy
from optimizer import SolveGaussHelmertProblem
from numpy.random import randn
from pycppad import independent,adfun
def GenerateAutoDiffFunction(g, x0, l0):
dim_x = x0.shape[0]
a_var = independent(np.hstack([x0, l0]))
jacobian = adfun(a_var, g(a_var[:dim_x], a_var[dim_x:])).jacobian
def g_autodiff(x, l):
err = g(x, l)
J = jacobian(np.hstack([x, l]))
return err, J[:, :dim_x], J[:, dim_x:]
return g_autodiff
def Rot2ax(R):
"""Rotation matrix to angle-axis vector"""
tr = np.trace(R)
a = np.array( [R[2,1]-R[1,2], R[0,2]-R[2,0], R[1,0]-R[0,1]] )
an = np.linalg.norm(a)
phi= np.arctan2(an, tr-1)
if np.abs(phi) < 1e-12:
return np.zeros(3,'d')
else:
return phi/an*a
def skew(v):
return np.array([[ 0, -v[2], v[1]],
[ v[2], 0, -v[0]],
[-v[1], v[0], 0 ]])
def ax2Rot(r):
"""Angle-axis vector to rotation matrix"""
p = np.linalg.norm(r)
if np.abs(p) < 1e-12:
return np.eye(3)
else:
S = skew(r/p)
return np.eye(3) + np.sin(p)*S + (1.0-np.cos(p))*S.dot(S)
def MfromRT(r,t):
T = np.eye(4)
T[:3,:3] = ax2Rot(r)
T[:3, 3] = t
return T
def RTfromM(mat):
return Rot2ax(mat[:3,:3]), mat[:3,3]
#%%
def ExtrinsicCalibration3D(trajectories_list, trajectories_cov_list=None, *args):
""" Motion-base sensor calibration
Constrain equations:
( I3 - R(r_b) )*xi_b = t_b - R(eta_b)*t_a
and
R(eta_b)*r_a = r_b
Input
-----------
trajectories_list: list( list( 4x4 pose matrices ) )
Output
-----------
calibration result as a list of matrix
"""
num_pose_list = list(map(len, trajectories_list))
if len(set(num_pose_list))!=1:
raise ValueError("each trajectory should have the same number of poses")
num_pose = num_pose_list[0]
num_sensor = len(trajectories_list)
num_solution = num_sensor-1
print("Input: %d sensors, each has %d poses" % (num_sensor, num_pose))
'''Assemble observation matrix lm, each row l = [ra,ta, ..., rm, tm]'''
stacked_r_list = [ np.vstack( [ Rot2ax(pose_mat[:3,:3]) for pose_mat in trajectory] )
for trajectory in trajectories_list ]
stacked_t_list = [ np.vstack( [ pose_mat[:3, 3] for pose_mat in trajectory] )
for trajectory in trajectories_list ]
r_t_interleaved = map(np.hstack, zip(stacked_r_list, stacked_t_list))
lm = np.hstack( r_t_interleaved ) # lm.shape = (num_pose, 6*num_sensor)
'''Assemble covariance matrix '''
if trajectories_cov_list is None:
Cov_ll = np.tile(np.eye(6*num_sensor), (num_pose, 1, 1))
else:
Cov_ll = np.zeros((num_pose, 6*num_sensor, 6*num_sensor))
cov_list_time_majored = list(zip(*trajectories_cov_list)) # list[sensor_idx][pose_idx] -> list[pose_idx][sensor_idx]
for pose_idx in range(num_pose):
Cov_ll[pose_idx, :, :] = scipy.linalg.block_diag(*cov_list_time_majored[pose_idx])
'''Calculate close form solution as initial guess'''
x0_list = []
I3 = np.eye(3)
for s in range(1, num_sensor):
# rotation first
H = stacked_r_list[0].T.dot(stacked_r_list[s])
U, d, Vt = np.linalg.svd(H)
R = Vt.T.dot(U.T)
# then translation
A = np.vstack([ I3 - ax2Rot(r_) for r_ in stacked_r_list[s]])
b = np.hstack( stacked_t_list[s] - ( R.dot(stacked_t_list[0].T) ).T )
t = np.linalg.lstsq(A, b)[0]
x0_list.append([Rot2ax(R), t])
x0 = np.array(x0_list).flatten()
print('Initial guess:')
map(lambda rt: print(MfromRT(*rt)), x0_list)
'''Assemble constraint functions '''
def g(x, l):
x = np.reshape(x, (num_solution, 6))
l = np.reshape(l, (num_sensor, 6))
r,t = np.split(l, 2, axis=1)
e = []
for x_s, s in zip(x, range(1, num_sensor)):
Rq = ax2Rot(x_s[0:3])
Rs = ax2Rot(r[s])
e.append(x_s[3:] - Rs.dot(x_s[3:]) + Rq.dot(t[0]) - t[s]) # g1 constraints
e.append( Rq.dot(r[0]) - r[s] ) # full-constraints
return np.hstack(e)
g_diff = GenerateAutoDiffFunction(g, x0, lm[0,:])
'''solve'''
xnu, Cov_xx, sigma_0, vv, w = SolveGaussHelmertProblem(g_diff, x0, lm, Cov_ll, *args)
return [MfromRT(x[:3], x[3:]) for x in np.split(xnu, num_solution) ]
#%%
def demo_and_test():
def randsp(n=3):
v = np.random.uniform(-1, 1, size=n)
return v/np.linalg.norm(v)
''' ground truth transformation between sensors '''
num_sensor = 3
num_solution = num_sensor-1
x_true = np.array([randsp() for _ in range(num_solution*2) ]).ravel() # x= [r1,t1,...,rn,tn]
Hm = [MfromRT(x[:3], x[3:]) for x in np.split(x_true, num_solution)]
''' generate ground truth trajectories '''
num_pose = 500
dM = []
Hm_inv = [np.linalg.inv(h) for h in Hm]
for t in range(num_pose):
dm = [MfromRT(randsp(),randsp())] # base sensor
for h, h_inv in zip(Hm, Hm_inv): # other sensor
dm.append( h.dot(dm[0]).dot(h_inv) )
dM.append(dm)
trajectories_list = zip(*dM)
''' add measurement noise'''
sigma_r = 1e-3
sigma_t = 1e-2
noisy_trajectories = []
for trajectory in trajectories_list:
one_trajectory = []
for pose in trajectory:
r,t = RTfromM(pose)
new_pose = MfromRT( r+sigma_r*randn(3), t+sigma_t*randn(3))
one_trajectory.append(new_pose)
noisy_trajectories.append(one_trajectory)
trajectory_covs = [ [np.diag([sigma_r]*3 + [sigma_t]*3)**2] * num_pose ] * num_sensor
H_est = ExtrinsicCalibration3D(noisy_trajectories, trajectory_covs)
print("After refinement:")
list(map(print, H_est))
print("Ground truth:")
list(map(print, Hm))
if __name__ =='__main__':
demo_and_test() |
the-stack_0_22201 | from . import api
from flask import jsonify, request, g
from ..models import Costs, Permission, WhoOwesWhom
from main_app import db
from .errors import forbidden
from main_app.costs.cost_handler import cost_handle
@api.route('/costs/<int:id>')
def get_cost(id):
cost = Costs.query.get_or_404(id)
return jsonify(cost.to_json()), 200
@api.route('/costs/self')
def view_user_costs():
costs = Costs.query.filter_by(who_spent=g.current_user.id).all()
return jsonify({'costs': [cost.to_json() for cost in costs]}), 200
@api.route('/costs/group/<int:id>')
def group_costs(id):
costs = Costs.query.filter_by(group_id=id).all()
return jsonify({'costs': [cost.to_json() for cost in costs]}), 200
@api.route('/costs/create', methods=['POST'])
def create_cost():
cost = Costs.from_json(request.json)
cost.who_spent = g.current_user.id
db.session.add(cost)
db.session.commit()
return jsonify(cost.to_json()), 201
@api.route('/costs/<int:id>', methods=['DELETE'])
def delete_cost(id):
cost = Costs.query.get_or_404(id)
if g.current_user.id == cost.who_spent or \
g.current_user.can(Permission.MODERATE):
db.session.delete(cost)
db.session.commit()
return jsonify({'massage': 'Cost was deleted'})
else:
return forbidden('Access denied')
@api.route('/costs/<int:id>', methods=['PUT'])
def update_cost(id):
cost = Costs.query.get_or_404(id)
if g.current_user == cost.author or \
g.current_user.can(Permission.MODERATE):
cost.cost_title = request.json.get('cost_title')
cost.spent_money = request.json.get('spent_money')
cost.group_id = request.json.get('group_id')
db.session.commit()
return jsonify(cost.to_json()), {"massage": 'successfully updated'}
return forbidden('Access denied')
@api.route('/costs/calculate/<int:id>')
def calculate_cost_group(id):
cost_handle(id)
return jsonify({'massage': 'All calculation, is done'})
@api.route('/costs/debt_table/<int:id>')
def debt_table(id):
who_to_whom = WhoOwesWhom.query.filter_by(group_id=id).all()
for w in who_to_whom:
if w.debt_amount == 0:
db.session.delete(w)
db.session.commit()
continue
return jsonify({"debt_row": [debt_row.to_json() for debt_row in who_to_whom]})
|
the-stack_0_22202 | from functools import partial
from .async_utils import fetch_schedules, fetch_episode_metadata
from .remote import RemoteMixIn
from .schedule import ChannelSchedule
from ..search import ScheduleSearchMixIn
from ..channel_ids import ChannelPicker
from ...api.json_helpers import EpisodeMetadataPidJson
from ...share import batch_multiprocess_with_return, async_errors
from ...share.time import parse_abs_from_rel_date, parse_date_range
__all__ = ["ChannelListings"]
class ChannelListings(ScheduleSearchMixIn, RemoteMixIn):
"""
Listings for a given channel
"""
episode_reader_func = EpisodeMetadataPidJson.reader_func
fetch_episode_metadata = fetch_episode_metadata # bind as method
def __init__(self, channel_id, from_date=None, to_date=None, n_days=None):
self.channel_id = channel_id
from_date, to_date, n_days = parse_date_range(from_date, to_date, n_days)
self.from_date, self.to_date, self.n_days = from_date, to_date, n_days
self.schedules = self.make_schedules()
self.fetch_schedules()
@property
def urlset(self):
"Generator of URLs for async fetching"
return (s.sched_url for s in self.schedules)
def make_schedule(self, date, defer_pull=True):
"Create schedule object; includes channel ID error handling"
return ChannelSchedule(self.channel_id, date=date, defer_pull=defer_pull)
def make_schedules(self, defer_pull=True):
"""
Make and return a list of ChannelSchedule objects and pull their
URLs collectively in an efficient async procedure (not seriallly).
"""
return [
self.make_schedule(
parse_abs_from_rel_date(self.from_date, ymd_ago=(0, 0, i)),
defer_pull=defer_pull,
)
for i in range(self.n_days)
]
def fetch_schedules(self, verbose=False, n_retries=3):
# (Due to httpx client bug documented in issue 6 of beeb issue tracker)
for i in range(n_retries):
try:
fetch_schedules(self.urlset, self.schedules)
except async_errors as e:
if verbose:
print(f"Error occurred {e}, retrying")
if i == n_retries - 1:
raise e # # Persisted after all retries, so throw it, don't proceed
# Otherwise retry, connection was terminated due to httpx bug (see #6)
else:
break # exit the for loop if it succeeds
self.boil_all_schedules(verbose=verbose)
def boil_all_schedules(self, verbose=False):
recipe_list = [
partial(s.boil_broadcasts, return_broadcasts=True) for s in self.schedules
]
# Batch the soup parsing on all cores then sort to regain chronological order
all_scheduled_broadcasts = sorted(
batch_multiprocess_with_return(
recipe_list, show_progress=verbose, tqdm_desc="Boiling schedules..."
),
key=lambda b: b[0].time,
)
for s, b in zip(self.schedules, all_scheduled_broadcasts):
s.broadcasts = b
@classmethod
def from_channel_name(cls, name, from_date=None, to_date=None, n_days=None):
ch = ChannelPicker.by_name(name, must_exist=True)
return cls(ch.channel_id, from_date=from_date, to_date=to_date, n_days=n_days)
@property
def date_repr(self):
return self.time.strftime("%d/%m/%Y")
@property
def time_repr(self):
return self.time.strftime("%H:%M")
def __repr__(self):
return (
f"ChannelListings for {self.channel.title} "
f"from {self.from_date} to {self.to_date} ({self.n_days} days)"
)
@property
def all_broadcasts(self):
"Presuming the schedules are already boiled (i.e. parsed), enumerate broadcasts"
return [b for s in self.schedules for b in s.broadcasts]
@property
def broadcasts_urlset(self):
"Make a generator to produce the URLs for the broadcasts from all_broadcasts"
return (
EpisodeMetadataPidJson(episode.pid, defer_pull=True).url
for episode in self.all_broadcasts
)
|
the-stack_0_22204 | # -*- coding: utf-8 -*-
'''
Connection module for Amazon Elasticsearch Service
.. versionadded:: 2016.11.0
:configuration: This module accepts explicit AWS credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
lambda.keyid: GKTADJGHEIQSXMKKRBJ08H
lambda.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
lambda.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
Create and delete methods return:
.. code-block:: yaml
created: true
or
.. code-block:: yaml
created: false
error:
message: error message
Request methods (e.g., `describe_function`) return:
.. code-block:: yaml
domain:
- {...}
- {...}
or
.. code-block:: yaml
error:
message: error message
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
import logging
import json
from distutils.version import LooseVersion as _LooseVersion # pylint: disable=import-error,no-name-in-module
# Import Salt libs
import salt.ext.six as six
import salt.utils.boto3
import salt.utils.compat
import salt.utils
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=import-error
try:
#pylint: disable=unused-import
import boto
import boto3
#pylint: enable=unused-import
from botocore.exceptions import ClientError
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error
def __virtual__():
'''
Only load if boto libraries exist and if boto libraries are greater than
a given version.
'''
required_boto_version = '2.8.0'
required_boto3_version = '1.4.0'
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
if not HAS_BOTO:
return (False, 'The boto_lambda module could not be loaded: '
'boto libraries not found')
elif _LooseVersion(boto.__version__) < _LooseVersion(required_boto_version):
return (False, 'The boto_lambda module could not be loaded: '
'boto version {0} or later must be installed.'.format(required_boto_version))
elif _LooseVersion(boto3.__version__) < _LooseVersion(required_boto3_version):
return (False, 'The boto_lambda module could not be loaded: '
'boto version {0} or later must be installed.'.format(required_boto3_version))
else:
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
__utils__['boto3.assign_funcs'](__name__, 'es')
def exists(DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Given a domain name, check to see if the given domain exists.
Returns True if the given domain exists and returns False if the given
function does not exist.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.exists mydomain
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
return {'exists': True}
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': salt.utils.boto3.get_error(e)}
def status(DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Given a domain name describe its status.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.status mydomain
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
if domain and 'DomainStatus' in domain:
domain = domain.get('DomainStatus', {})
keys = ('Endpoint', 'Created', 'Deleted',
'DomainName', 'DomainId', 'EBSOptions', 'SnapshotOptions',
'AccessPolicies', 'Processing', 'AdvancedOptions', 'ARN',
'ElasticsearchVersion')
return {'domain': dict([(k, domain.get(k)) for k in keys if k in domain])}
else:
return {'domain': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def describe(DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Given a domain name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.describe mydomain
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain_config(DomainName=DomainName)
if domain and 'DomainConfig' in domain:
domain = domain['DomainConfig']
keys = ('ElasticsearchClusterConfig', 'EBSOptions', 'AccessPolicies',
'SnapshotOptions', 'AdvancedOptions')
return {'domain': dict([(k, domain.get(k, {}).get('Options')) for k in keys if k in domain])}
else:
return {'domain': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion=None):
'''
Given a valid config, create a domain.
Returns {created: true} if the domain was created and returns
{created: False} if the domain was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.create mydomain \\
{'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, \\
'DedicatedMasterEnabled': false, 'ZoneAwarenessEnabled': false} \\
{'EBSEnabled': true, 'VolumeType': 'gp2', 'VolumeSize': 10, \\
'Iops': 0} \\
{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", \\
"Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\
"Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]} \\
{"AutomatedSnapshotStartHour": 0} \\
{"rest.action.multi.allow_explicit_index": "true"}
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
for k in ('ElasticsearchClusterConfig', 'EBSOptions',
'AccessPolicies', 'SnapshotOptions', 'AdvancedOptions',
'ElasticsearchVersion'):
if locals()[k] is not None:
val = locals()[k]
if isinstance(val, six.string_types):
try:
val = json.loads(val)
except ValueError as e:
return {'updated': False, 'error': 'Error parsing {0}: {1}'.format(k, e.message)}
kwargs[k] = val
if 'AccessPolicies' in kwargs:
kwargs['AccessPolicies'] = json.dumps(kwargs['AccessPolicies'])
if 'ElasticsearchVersion' in kwargs:
kwargs['ElasticsearchVersion'] = str(kwargs['ElasticsearchVersion'])
domain = conn.create_elasticsearch_domain(DomainName=DomainName, **kwargs)
if domain and 'DomainStatus' in domain:
return {'created': True}
else:
log.warning('Domain was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
def delete(DomainName, region=None, key=None, keyid=None, profile=None):
'''
Given a domain name, delete it.
Returns {deleted: true} if the domain was deleted and returns
{deleted: false} if the domain was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.delete mydomain
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_elasticsearch_domain(DomainName=DomainName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
def update(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None,
region=None, key=None, keyid=None, profile=None):
'''
Update the named domain to the configuration.
Returns {updated: true} if the domain was updated and returns
{updated: False} if the domain was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.update mydomain \\
{'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, \\
'DedicatedMasterEnabled': false, 'ZoneAwarenessEnabled': false} \\
{'EBSEnabled': true, 'VolumeType': 'gp2', 'VolumeSize': 10, \\
'Iops': 0} \\
{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", \\
"Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\
"Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]} \\
{"AutomatedSnapshotStartHour": 0} \\
{"rest.action.multi.allow_explicit_index": "true"}
'''
call_args = {}
for k in ('ElasticsearchClusterConfig', 'EBSOptions',
'AccessPolicies', 'SnapshotOptions', 'AdvancedOptions'):
if locals()[k] is not None:
val = locals()[k]
if isinstance(val, six.string_types):
try:
val = json.loads(val)
except ValueError as e:
return {'updated': False, 'error': 'Error parsing {0}: {1}'.format(k, e.message)}
call_args[k] = val
if 'AccessPolicies' in call_args:
call_args['AccessPolicies'] = json.dumps(call_args['AccessPolicies'])
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
domain = conn.update_elasticsearch_domain_config(DomainName=DomainName, **call_args)
if not domain or 'DomainConfig' not in domain:
log.warning('Domain was not updated')
return {'updated': False}
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
def add_tags(DomainName=None, ARN=None,
region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Add tags to a domain
Returns {tagged: true} if the domain was tagged and returns
{tagged: False} if the domain was not tagged.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if str(k).startswith('__'):
continue
tagslist.append({'Key': str(k), 'Value': str(v)})
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
domaindata = status(DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)
if not domaindata or 'domain' not in domaindata:
log.warning('Domain tags not updated')
return {'tagged': False}
ARN = domaindata.get('domain', {}).get('ARN')
elif DomainName is not None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
conn.add_tags(ARN=ARN, TagList=tagslist)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
def remove_tags(TagKeys, DomainName=None, ARN=None,
region=None, key=None, keyid=None, profile=None):
'''
Remove tags from a trail
Returns {tagged: true} if the trail was tagged and returns
{tagged: False} if the trail was not tagged.
CLI Example:
.. code-block:: bash
salt myminion boto_cloudtrail.remove_tags my_trail tag_a=tag_value tag_b=tag_value
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
domaindata = status(DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)
if not domaindata or 'domain' not in domaindata:
log.warning('Domain tags not updated')
return {'tagged': False}
ARN = domaindata.get('domain', {}).get('ARN')
elif DomainName is not None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
conn.remove_tags(ARN=domaindata.get('domain', {}).get('ARN'),
TagKeys=TagKeys)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
def list_tags(DomainName=None, ARN=None,
region=None, key=None, keyid=None, profile=None):
'''
List tags of a trail
Returns:
tags:
- {...}
- {...}
CLI Example:
.. code-block:: bash
salt myminion boto_cloudtrail.list_tags my_trail
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
domaindata = status(DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)
if not domaindata or 'domain' not in domaindata:
log.warning('Domain tags not updated')
return {'tagged': False}
ARN = domaindata.get('domain', {}).get('ARN')
elif DomainName is not None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
ret = conn.list_tags(ARN=ARN)
log.warning(ret)
tlist = ret.get('TagList', [])
tagdict = {}
for tag in tlist:
tagdict[tag.get('Key')] = tag.get('Value')
return {'tags': tagdict}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
|
the-stack_0_22205 | from dataclasses import dataclass, field
from typing import List
from enum import Enum
class TaskType(str, Enum):
text_classification = "text-classification"
named_entity_recognition = "named-entity-recognition"
extractive_qa_squad = "extractive-qa-squad"
summarization = "summarization"
machine_translation = "machine-translation"
text_pair_classification = "text-pair-classification"
hellaswag = "hellaswag"
aspect_based_sentiment_classification = "aspect-based-sentiment-classification"
kg_link_tail_prediction = "kg-link-tail-prediction"
qa_multiple_choice = "qa-multiple-choice"
@staticmethod
def list():
return list(map(lambda c: c.value, TaskType))
@dataclass
class Task:
name: str
description: str = "task description"
supported: bool = field(default=False)
supported_metrics: List[str] = field(default_factory=list)
supported_formats: List[str] = field(default_factory=list)
supported_datasets: List[str] = field(default_factory=list)
@dataclass
class TaskCategory:
name: str
description: str
tasks: List[Task]
_task_categories: List[TaskCategory] = [
TaskCategory(
"conditional-text-generation",
"data-to-text and text transduction tasks such as translation or summarization",
[
Task(
name=TaskType.machine_translation,
description="The process of using AI to automatically translate text from one language to another.",
supported=True,
supported_metrics=[
"bleu",
"bart_score_summ",
"bart_score_mt",
"bart_score_cnn_hypo_ref",
"rouge1",
"rouge2",
"rougeL",
"bert_score_f",
"bert_score_p",
"bert_score_r",
"chrf",
"comet",
"mover_score",
"prism",
],
supported_formats=["tsv"],
supported_datasets=[],
),
Task(
name=TaskType.summarization,
description="Summarize long documents into short texts. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/docs/task_summarization.md",
supported=True,
supported_metrics=[
"bleu",
"bart_score_summ",
"bart_score_mt",
"bart_score_cnn_hypo_ref",
"rouge1",
"rouge2",
"rougeL",
"bert_score_f",
"bert_score_p",
"bert_score_r",
"chrf",
"comet",
"mover_score",
"prism",
],
supported_formats=["tsv"],
supported_datasets=[],
),
],
),
TaskCategory(
"text-classification",
"predicting a class index or boolean value. ",
[
Task(
name=TaskType.text_classification,
description="Classify a text into one or multiple predefined categories. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/docs/task_text_classification.md",
supported=True,
supported_metrics=["F1score", "Accuracy"],
supported_formats=["tsv"],
supported_datasets=[],
)
],
),
TaskCategory(
"structure-prediction",
"predicting structural properties of the text, such as syntax",
[
Task(
name=TaskType.named_entity_recognition,
description="Recognize named entities from a given text. See one example of the uploaded file: https://github.com/neulab/ExplainaBoard/blob/main/data/system_outputs/conll2003/conll2003.elmo",
supported=True,
supported_metrics=["f1_score_seqeval"],
supported_formats=["conll"],
supported_datasets=[],
)
],
),
TaskCategory(
"question-answering",
"question answering tasks",
[
Task(
name=TaskType.extractive_qa_squad,
description="A task of extracting an answer from a text given a question on the SQuAD dataset. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/docs/task_extractive_qa_squad.md",
supported=True,
supported_metrics=["f1_score_qa", "exact_match_qa"],
supported_formats=["json"],
supported_datasets=["squad"],
),
],
),
TaskCategory(
"span-text-prediction",
"prediction based on span and text",
[
Task(
name=TaskType.aspect_based_sentiment_classification,
description="Predict the sentiment of a text based on a specific aspect. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/data/system_outputs/absa/test-aspect.tsv",
supported=True,
supported_metrics=["F1score", "Accuracy"],
supported_formats=["tsv"],
supported_datasets=[],
),
],
),
TaskCategory(
"text-pair-classification",
"predicting a class of two texts",
[
Task(
name=TaskType.text_pair_classification,
description="predict the relationship of two texts. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/docs/task_text_pair_classification.md",
supported=True,
supported_metrics=["F1score", "Accuracy"],
supported_formats=["tsv"],
supported_datasets=[],
),
],
),
TaskCategory(
"kg-link-tail-prediction",
"predicting the tail entity of missing links in knowledge graphs",
[
Task(
name=TaskType.kg_link_tail_prediction,
description="predicting the tail entity of missing links in knowledge graphs. See more details about the format of upload files: https://github.com/neulab/ExplainaBoard/blob/main/docs/task_kg_link_tail_prediction.md",
supported=True,
supported_metrics=["Hits"],
supported_formats=["json"],
supported_datasets=[],
),
],
),
TaskCategory(
"qa-multiple-choice",
"Answer a question from multiple options",
[
Task(
name=TaskType.qa_multiple_choice,
description="Answer a question from multiple options",
supported=True,
supported_metrics=["F1score", "Accuracy"],
supported_formats=["json"],
supported_datasets=[],
)
],
),
]
def get_task_categories():
"""getter for task categories data"""
return _task_categories
|
the-stack_0_22207 | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import neutron.plugins.openvswitch.common.constants as ovs_const
from neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl \
import ovs_bridge_test_base
call = mock.call # short hand
class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
ovs_bridge_test_base.OVSDVRProcessTestMixin):
dvr_process_table_id = ovs_const.DVR_PROCESS
dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN
def setUp(self):
super(OVSTunnelBridgeTest, self).setUp()
self.setup_bridge_mock('br-tun', self.br_tun_cls)
def test_setup_default_table(self):
patch_int_ofport = 5555
arp_responder_enabled = False
self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
arp_responder_enabled=arp_responder_enabled)
expected = [
call.add_flow(priority=1, in_port=patch_int_ofport,
actions='resubmit(,2)'),
call.add_flow(priority=0, actions='drop'),
call.add_flow(priority=0, table=2,
dl_dst='00:00:00:00:00:00/01:00:00:00:00:00',
actions='resubmit(,20)'),
call.add_flow(priority=0, table=2,
dl_dst='01:00:00:00:00:00/01:00:00:00:00:00',
actions='resubmit(,22)'),
call.add_flow(priority=0, table=3, actions='drop'),
call.add_flow(priority=0, table=4, actions='drop'),
call.add_flow(priority=1, table=10,
actions='learn(table=20,priority=1,'
'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],'
'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
'load:0->NXM_OF_VLAN_TCI[],'
'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
'output:NXM_OF_IN_PORT[]),'
'output:%s' % patch_int_ofport),
call.add_flow(priority=0, table=20, actions='resubmit(,22)'),
call.add_flow(priority=0, table=22, actions='drop'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_setup_default_table_arp_responder_enabled(self):
patch_int_ofport = 5555
arp_responder_enabled = True
self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
arp_responder_enabled=arp_responder_enabled)
expected = [
call.add_flow(priority=1, in_port=patch_int_ofport,
actions='resubmit(,2)'),
call.add_flow(priority=0, actions='drop'),
call.add_flow(priority=1, table=2, dl_dst='ff:ff:ff:ff:ff:ff',
actions='resubmit(,21)', proto='arp'),
call.add_flow(priority=0, table=2,
dl_dst='00:00:00:00:00:00/01:00:00:00:00:00',
actions='resubmit(,20)'),
call.add_flow(priority=0, table=2,
dl_dst='01:00:00:00:00:00/01:00:00:00:00:00',
actions='resubmit(,22)'),
call.add_flow(priority=0, table=3, actions='drop'),
call.add_flow(priority=0, table=4, actions='drop'),
call.add_flow(priority=1, table=10,
actions='learn(table=20,priority=1,'
'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],'
'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
'load:0->NXM_OF_VLAN_TCI[],'
'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
'output:NXM_OF_IN_PORT[]),'
'output:%s' % patch_int_ofport),
call.add_flow(priority=0, table=20, actions='resubmit(,22)'),
call.add_flow(priority=0, table=21, actions='resubmit(,22)'),
call.add_flow(priority=0, table=22, actions='drop'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan(self):
network_type = 'vxlan'
lvid = 888
segmentation_id = 777
distributed = False
self.br.provision_local_vlan(network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
expected = [
call.add_flow(priority=1, tun_id=segmentation_id,
actions='mod_vlan_vid:%s,resubmit(,10)' % lvid,
table=4),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan(self):
network_type = 'vxlan'
segmentation_id = 777
self.br.reclaim_local_vlan(network_type=network_type,
segmentation_id=segmentation_id)
expected = [
call.delete_flows(tun_id=segmentation_id, table=4),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_flood_to_tun(self):
vlan = 3333
tun_id = 2222
ports = [11, 44, 22, 33]
self.br.install_flood_to_tun(vlan=vlan,
tun_id=tun_id,
ports=ports)
expected = [
call.mod_flow(table=22, dl_vlan=vlan,
actions='strip_vlan,set_tunnel:%(tun)s,'
'output:%(ports)s' % {
'tun': tun_id,
'ports': ','.join(map(str, ports)),
}),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_flood_to_tun(self):
vlan = 3333
self.br.delete_flood_to_tun(vlan=vlan)
expected = [
call.delete_flows(table=22, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_unicast_to_tun(self):
vlan = 3333
port = 55
mac = '08:60:6e:7f:74:e7'
tun_id = 2222
self.br.install_unicast_to_tun(vlan=vlan,
tun_id=tun_id,
port=port,
mac=mac)
expected = [
call.add_flow(priority=2, table=20, dl_dst=mac, dl_vlan=vlan,
actions='strip_vlan,set_tunnel:%(tun)s,'
'output:%(port)s' % {
'tun': tun_id,
'port': port,
}),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_unicast_to_tun(self):
vlan = 3333
mac = '08:60:6e:7f:74:e7'
self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
expected = [
call.delete_flows(table=20, dl_dst=mac, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_unicast_to_tun_without_mac(self):
vlan = 3333
mac = None
self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
expected = [
call.delete_flows(table=20, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_arp_responder(self):
vlan = 3333
ip = '192.0.2.1'
mac = '08:60:6e:7f:74:e7'
self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac)
expected = [
call.add_flow(proto='arp', nw_dst=ip,
actions='move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],in_port' % {
'mac': netaddr.EUI(mac,
dialect=netaddr.mac_unix),
'ip': netaddr.IPAddress(ip),
},
priority=1, table=21, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_responder(self):
vlan = 3333
ip = '192.0.2.1'
self.br.delete_arp_responder(vlan=vlan, ip=ip)
expected = [
call.delete_flows(table=21, dl_vlan=vlan, proto='arp', nw_dst=ip),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_responder_without_ip(self):
vlan = 3333
ip = None
self.br.delete_arp_responder(vlan=vlan, ip=ip)
expected = [
call.delete_flows(table=21, dl_vlan=vlan, proto='arp'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_setup_tunnel_port(self):
network_type = 'vxlan'
port = 11111
self.br.setup_tunnel_port(network_type=network_type, port=port)
expected = [
call.add_flow(priority=1, in_port=port, actions='resubmit(,4)'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_cleanup_tunnel_port(self):
port = 11111
self.br.cleanup_tunnel_port(port=port)
expected = [
call.delete_flows(in_port=port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_tun(mac=mac, port=port)
expected = [
call.add_flow(priority=1, table=9, dl_src=mac,
actions='output:%s' % port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
self.br.remove_dvr_mac_tun(mac=mac)
expected = [
call.delete_flows(eth_src=mac, table_id=9),
]
self.assertEqual(expected, self.mock.mock_calls)
def _mock_add_tunnel_port(self, deferred_br=False):
port_name = 'fake_port'
remote_ip = '192.168.1.3'
local_ip = '192.168.1.2'
tunnel_type = 'vxlan'
vxlan_udp_port = '4789'
dont_fragment = True
if deferred_br:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
return_value=9999) as add_port, \
self.br.deferred() as deferred_br:
ofport = deferred_br.add_tunnel_port(port_name, remote_ip,
local_ip, tunnel_type,
vxlan_udp_port,
dont_fragment)
else:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
return_value=9999) as add_port:
ofport = self.br.add_tunnel_port(port_name, remote_ip,
local_ip, tunnel_type,
vxlan_udp_port,
dont_fragment)
self.assertEqual(9999, ofport)
self.assertEqual(1, add_port.call_count)
self.assertEqual(port_name, add_port.call_args[0][0])
def _mock_delete_port(self, deferred_br=False):
port_name = 'fake_port'
if deferred_br:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'delete_port') as delete_port, \
self.br.deferred() as deferred_br:
deferred_br.delete_port(port_name)
else:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'delete_port') as delete_port:
self.br.delete_port(port_name)
self.assertEqual([call(port_name)], delete_port.mock_calls)
def test_add_tunnel_port(self):
self._mock_add_tunnel_port()
def test_delete_port(self):
self._mock_delete_port()
def test_deferred_br_add_tunnel_port(self):
self._mock_add_tunnel_port(True)
def test_deferred_br_delete_port(self):
self._mock_delete_port(True)
|
the-stack_0_22208 | from concurrent import futures
from functools import partial
import logging
import typing as ty
import psutil
import os
import signal
import sys
import time
from concurrent.futures import ProcessPoolExecutor
try:
from npshmex import ProcessPoolExecutor as SHMExecutor
except ImportError:
# This is allowed to fail, it only crashes if allow_shm = True
SHMExecutor = None
pass
import numpy as np
import strax
export, __all__ = strax.exporter()
@export
class ProcessorComponents(ty.NamedTuple):
"""Specification to assemble a processor"""
plugins: ty.Dict[str, strax.Plugin]
loaders: ty.Dict[str, callable]
savers: ty.Dict[str, ty.List[strax.Saver]]
targets: ty.Tuple[str]
class MailboxDict(dict):
def __missing__(self, key):
res = self[key] = strax.Mailbox(name=key + '_mailbox')
return res
@export
class ThreadedMailboxProcessor:
mailboxes: ty.Dict[str, strax.Mailbox]
def __init__(self,
components: ProcessorComponents,
allow_rechunk=True, allow_shm=False,
allow_multiprocess=False,
max_workers=None):
self.log = logging.getLogger(self.__class__.__name__)
self.components = components
self.mailboxes = MailboxDict()
self.log.debug("Processor components are: " + str(components))
if max_workers in [None, 1]:
# Disable the executors: work in one process.
# Each plugin works completely in its own thread.
self.process_executor = self.thread_executor = None
else:
# Use executors for parallelization of computations.
self.thread_executor = futures.ThreadPoolExecutor(
max_workers=max_workers)
mp_plugins = {d: p for d, p in components.plugins.items()
if p.parallel == 'process'}
if (allow_multiprocess and len(mp_plugins)):
_proc_ex = ProcessPoolExecutor
if allow_shm:
if SHMExecutor is None:
raise RuntimeError(
"You must install npshmex to enable shm"
" transfer of numpy arrays.")
_proc_ex = SHMExecutor
self.process_executor = _proc_ex(max_workers=max_workers)
# Combine as many plugins /savers as possible in one process
# TODO: more intelligent start determination, multiple starts
start_from = list(mp_plugins.keys())[
int(np.argmin([len(p.depends_on)
for p in mp_plugins.values()]))]
components = strax.ParallelSourcePlugin.inline_plugins(
components, start_from, log=self.log)
self.components = components
self.log.debug("Altered components for multiprocessing: "
+ str(components))
else:
self.process_executor = self.thread_executor
for d, loader in components.loaders.items():
assert d not in components.plugins
# If paralellizing, use threads for loading
# the decompressor releases the gil, and we have a lot
# of data transfer to do
self.mailboxes[d].add_sender(
loader(executor=self.thread_executor),
name=f'load:{d}')
multi_output_seen = []
for d, p in components.plugins.items():
if p in multi_output_seen:
continue
executor = None
if p.parallel == 'process':
executor = self.process_executor
elif p.parallel:
executor = self.thread_executor
if p.multi_output:
multi_output_seen.append(p)
# Create temp mailbox that receives multi-output dicts
# and sends them forth to other mailboxes
mname = p.__class__.__name__ + '_divide_outputs'
self.mailboxes[mname].add_sender(
p.iter(
iters={dep: self.mailboxes[dep].subscribe()
for dep in p.depends_on},
executor=executor),
name=f'divide_outputs:{d}')
for d in p.provides:
self.mailboxes[d] # creates mailbox d if doesn't exist
self.mailboxes[mname].add_reader(
partial(strax.divide_outputs,
mailboxes=self.mailboxes,
outputs=p.provides))
else:
self.mailboxes[d].add_sender(
p.iter(
iters={dep: self.mailboxes[dep].subscribe()
for dep in p.depends_on},
executor=executor),
name=f'build:{d}')
for d, savers in components.savers.items():
for s_i, saver in enumerate(savers):
if d in components.plugins:
rechunk = components.plugins[d].rechunk_on_save
else:
# This is storage conversion mode
# TODO: Don't know how to get this info, for now,
# be conservative and don't rechunk
rechunk = False
if not allow_rechunk:
rechunk = False
self.mailboxes[d].add_reader(
partial(saver.save_from,
rechunk=rechunk,
# If paralellizing, use threads for saving
# the compressor releases the gil,
# and we have a lot of data transfer to do
executor=self.thread_executor),
name=f'save_{s_i}:{d}')
def iter(self):
target = self.components.targets[0]
final_generator = self.mailboxes[target].subscribe()
self.log.debug("Starting threads")
for m in self.mailboxes.values():
m.start()
self.log.debug(f"Yielding {target}")
traceback = None
exc = None
try:
yield from final_generator
except Exception as e:
self.log.debug(f"Target Mailbox ({target}) killed, exception {e}")
for m in self.mailboxes.values():
if m != target:
self.log.debug(f"Killing {m}")
if isinstance(e, strax.MailboxKilled):
_, exc, traceback = reason = e.args[0]
else:
exc = e
reason = (e.__class__, e, sys.exc_info()[2])
traceback = reason[2]
m.kill(upstream=True, reason=reason)
# We will reraise it in just a moment...
finally:
self.log.debug("Closing threads")
for m in self.mailboxes.values():
m.cleanup()
self.log.debug("Closing threads completed")
self.log.debug("Closing executors")
if self.thread_executor is not None:
self.thread_executor.shutdown(wait=False)
if self.process_executor not in [None, self.thread_executor]:
# Unfortunately there is no wait=timeout option, so we have to
# roll our own
pids = self.process_executor._processes.keys()
self.process_executor.shutdown(wait=False)
t0 = time.time()
while time.time() < t0 + 20:
if all([not psutil.pid_exists(pid) for pid in pids]):
break
self.log.info("Waiting for subprocesses to end")
time.sleep(2)
else:
self.log.warning("Subprocesses failed to terminate, "
"resorting to brute force killing")
for pid in pids:
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Didn't exist
pass
self.log.info("Sent SIGTERM to all subprocesses")
self.log.debug("Closing executors completed")
if traceback is not None:
# Reraise exception. This is outside the except block
# to avoid the 'during handling of this exception, another
# exception occurred' stuff from confusing the traceback
# which is printed for the user
self.log.debug("Reraising exception")
raise exc.with_traceback(traceback)
# Check the savers for any exception that occurred during saving
# These are thrown back to the mailbox, but if that has already closed
# it doesn't trigger a crash...
# TODO: add savers inlined by parallelsourceplugin
# TODO: need to look at plugins too if we ever implement true
# multi-target mode
for k, saver_list in self.components.savers.items():
for s in saver_list:
if s.got_exception:
self.log.fatal(f"Caught error while saving {k}!")
raise s.got_exception
self.log.debug("Processing finished")
|
the-stack_0_22210 | """
API operations on a jobs.
.. seealso:: :class:`galaxy.model.Jobs`
"""
import json
import logging
from six import string_types
from sqlalchemy import and_, false, or_
from sqlalchemy.orm import aliased
from galaxy import exceptions
from galaxy import managers
from galaxy import model
from galaxy import util
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import UsesLibraryMixinItems
log = logging.getLogger( __name__ )
class JobController( BaseAPIController, UsesLibraryMixinItems ):
def __init__( self, app ):
super( JobController, self ).__init__( app )
self.hda_manager = managers.hdas.HDAManager( app )
self.dataset_manager = managers.datasets.DatasetManager( app )
@expose_api
def index( self, trans, **kwd ):
"""
index( trans, state=None, tool_id=None, history_id=None, date_range_min=None, date_range_max=None, user_details=False )
* GET /api/jobs:
return jobs for current user
!! if user is admin and user_details is True, then
return jobs for all galaxy users based on filtering - this is an extended service
:type state: string or list
:param state: limit listing of jobs to those that match one of the included states. If none, all are returned.
Valid Galaxy job states include:
'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new'
:type tool_id: string or list
:param tool_id: limit listing of jobs to those that match one of the included tool_ids. If none, all are returned.
:type user_details: boolean
:param user_details: if true, and requestor is an admin, will return external job id and user email.
:type date_range_min: string '2014-01-01'
:param date_range_min: limit the listing of jobs to those updated on or after requested date
:type date_range_max: string '2014-12-31'
:param date_range_max: limit the listing of jobs to those updated on or before requested date
:type history_id: string
:param history_id: limit listing of jobs to those that match the history_id. If none, all are returned.
:rtype: list
:returns: list of dictionaries containing summary job information
"""
state = kwd.get( 'state', None )
is_admin = trans.user_is_admin()
user_details = kwd.get('user_details', False)
if is_admin:
query = trans.sa_session.query( trans.app.model.Job )
else:
query = trans.sa_session.query( trans.app.model.Job ).filter(trans.app.model.Job.user == trans.user)
def build_and_apply_filters( query, objects, filter_func ):
if objects is not None:
if isinstance( objects, string_types ):
query = query.filter( filter_func( objects ) )
elif isinstance( objects, list ):
t = []
for obj in objects:
t.append( filter_func( obj ) )
query = query.filter( or_( *t ) )
return query
query = build_and_apply_filters( query, state, lambda s: trans.app.model.Job.state == s )
query = build_and_apply_filters( query, kwd.get( 'tool_id', None ), lambda t: trans.app.model.Job.tool_id == t )
query = build_and_apply_filters( query, kwd.get( 'tool_id_like', None ), lambda t: trans.app.model.Job.tool_id.like(t) )
query = build_and_apply_filters( query, kwd.get( 'date_range_min', None ), lambda dmin: trans.app.model.Job.table.c.update_time >= dmin )
query = build_and_apply_filters( query, kwd.get( 'date_range_max', None ), lambda dmax: trans.app.model.Job.table.c.update_time <= dmax )
history_id = kwd.get( 'history_id', None )
if history_id is not None:
try:
decoded_history_id = self.decode_id(history_id)
query = query.filter( trans.app.model.Job.history_id == decoded_history_id )
except:
raise exceptions.ObjectAttributeInvalidException()
out = []
if kwd.get( 'order_by' ) == 'create_time':
order_by = trans.app.model.Job.create_time.desc()
else:
order_by = trans.app.model.Job.update_time.desc()
for job in query.order_by( order_by ).all():
job_dict = job.to_dict( 'collection', system_details=is_admin )
j = self.encode_all_ids( trans, job_dict, True )
if user_details:
j['user_email'] = job.user.email
out.append(j)
return out
@expose_api
def show( self, trans, id, **kwd ):
"""
show( trans, id )
* GET /api/jobs/{id}:
return jobs for current user
:type id: string
:param id: Specific job id
:type full: boolean
:param full: whether to return extra information
:rtype: dictionary
:returns: dictionary containing full description of job data
"""
job = self.__get_job( trans, id )
is_admin = trans.user_is_admin()
job_dict = self.encode_all_ids( trans, job.to_dict( 'element', system_details=is_admin ), True )
full_output = util.asbool( kwd.get( 'full', 'false' ) )
if full_output:
job_dict.update( dict( stderr=job.stderr, stdout=job.stdout ) )
if is_admin:
job_dict['user_email'] = job.user.email
def metric_to_dict(metric):
metric_name = metric.metric_name
metric_value = metric.metric_value
metric_plugin = metric.plugin
title, value = trans.app.job_metrics.format(metric_plugin, metric_name, metric_value)
return dict(
title=title,
value=value,
plugin=metric_plugin,
name=metric_name,
raw_value=str(metric_value),
)
job_dict['job_metrics'] = [metric_to_dict(metric) for metric in job.metrics]
return job_dict
@expose_api
def inputs( self, trans, id, **kwd ):
"""
show( trans, id )
* GET /api/jobs/{id}/inputs
returns input datasets created by job
:type id: string
:param id: Encoded job id
:rtype: dictionary
:returns: dictionary containing input dataset associations
"""
job = self.__get_job( trans, id )
return self.__dictify_associations( trans, job.input_datasets, job.input_library_datasets )
@expose_api
def outputs( self, trans, id, **kwd ):
"""
show( trans, id )
* GET /api/jobs/{id}/outputs
returns output datasets created by job
:type id: string
:param id: Encoded job id
:rtype: dictionary
:returns: dictionary containing output dataset associations
"""
job = self.__get_job( trans, id )
return self.__dictify_associations( trans, job.output_datasets, job.output_library_datasets )
@expose_api_anonymous
def build_for_rerun( self, trans, id, **kwd ):
"""
* GET /api/jobs/{id}/build_for_rerun
returns a tool input/param template prepopulated with this job's
information, suitable for rerunning or rendering parameters of the
job.
:type id: string
:param id: Encoded job id
:rtype: dictionary
:returns: dictionary containing output dataset associations
"""
job = self.__get_job(trans, id)
if not job:
raise exceptions.ObjectNotFound("Could not access job with id '%s'" % id)
tool = self.app.toolbox.get_tool( job.tool_id, job.tool_version )
if not tool.is_workflow_compatible:
raise exceptions.ConfigDoesNotAllowException( "Tool '%s' cannot be rerun." % ( job.tool_id ) )
return tool.to_json(trans, {}, job=job)
def __dictify_associations( self, trans, *association_lists ):
rval = []
for association_list in association_lists:
rval.extend( map( lambda a: self.__dictify_association( trans, a ), association_list ) )
return rval
def __dictify_association( self, trans, job_dataset_association ):
dataset_dict = None
dataset = job_dataset_association.dataset
if dataset:
if isinstance( dataset, model.HistoryDatasetAssociation ):
dataset_dict = dict( src="hda", id=trans.security.encode_id( dataset.id ) )
else:
dataset_dict = dict( src="ldda", id=trans.security.encode_id( dataset.id ) )
return dict( name=job_dataset_association.name, dataset=dataset_dict )
def __get_job( self, trans, id ):
try:
decoded_job_id = self.decode_id( id )
except Exception:
raise exceptions.MalformedId()
job = trans.sa_session.query( trans.app.model.Job ).filter( trans.app.model.Job.id == decoded_job_id ).first()
if job is None:
raise exceptions.ObjectNotFound()
if not trans.user_is_admin() and job.user != trans.user:
if not job.output_datasets:
raise exceptions.ItemAccessibilityException( "Job has no output datasets." )
for data_assoc in job.output_datasets:
if not self.dataset_manager.is_accessible( data_assoc.dataset.dataset, trans.user ):
raise exceptions.ItemAccessibilityException( "You are not allowed to rerun this job." )
return job
@expose_api
def create( self, trans, payload, **kwd ):
""" See the create method in tools.py in order to submit a job. """
raise NotImplementedError( 'Please POST to /api/tools instead.' )
@expose_api
def search( self, trans, payload, **kwd ):
"""
search( trans, payload )
* POST /api/jobs/search:
return jobs for current user
:type payload: dict
:param payload: Dictionary containing description of requested job. This is in the same format as
a request to POST /apt/tools would take to initiate a job
:rtype: list
:returns: list of dictionaries containing summary job information of the jobs that match the requested job run
This method is designed to scan the list of previously run jobs and find records of jobs that had
the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
recycle the old results.
"""
tool_id = None
if 'tool_id' in payload:
tool_id = payload.get( 'tool_id' )
if tool_id is None:
raise exceptions.ObjectAttributeMissingException( "No tool id" )
tool = trans.app.toolbox.get_tool( tool_id )
if tool is None:
raise exceptions.ObjectNotFound( "Requested tool not found" )
if 'inputs' not in payload:
raise exceptions.ObjectAttributeMissingException( "No inputs defined" )
inputs = payload[ 'inputs' ]
input_data = {}
input_param = {}
for k, v in inputs.items():
if isinstance( v, dict ):
if 'id' in v:
if 'src' not in v or v[ 'src' ] == 'hda':
hda_id = self.decode_id( v['id'] )
dataset = self.hda_manager.get_accessible( hda_id, trans.user )
else:
dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
if dataset is None:
raise exceptions.ObjectNotFound( "Dataset %s not found" % ( v[ 'id' ] ) )
input_data[k] = dataset.dataset_id
else:
input_param[k] = json.dumps( str(v) )
query = trans.sa_session.query( trans.app.model.Job ).filter(
trans.app.model.Job.tool_id == tool_id,
trans.app.model.Job.user == trans.user
)
if 'state' not in payload:
query = query.filter(
or_(
trans.app.model.Job.state == 'running',
trans.app.model.Job.state == 'queued',
trans.app.model.Job.state == 'waiting',
trans.app.model.Job.state == 'running',
trans.app.model.Job.state == 'ok',
)
)
else:
if isinstance( payload[ 'state' ], string_types ):
query = query.filter( trans.app.model.Job.state == payload[ 'state' ] )
elif isinstance( payload[ 'state' ], list ):
o = []
for s in payload[ 'state' ]:
o.append( trans.app.model.Job.state == s )
query = query.filter(
or_( *o )
)
for k, v in input_param.items():
a = aliased( trans.app.model.JobParameter )
query = query.filter( and_(
trans.app.model.Job.id == a.job_id,
a.name == k,
a.value == v
) )
for k, v in input_data.items():
# Here we are attempting to link the inputs to the underlying
# dataset (not the dataset association).
# This way, if the calculation was done using a copied HDA
# (copied from the library or another history), the search will
# still find the job
a = aliased( trans.app.model.JobToInputDatasetAssociation )
b = aliased( trans.app.model.HistoryDatasetAssociation )
query = query.filter( and_(
trans.app.model.Job.id == a.job_id,
a.dataset_id == b.id,
b.deleted == false(),
b.dataset_id == v
) )
out = []
for job in query.all():
# check to make sure none of the output files have been deleted
if all( list( a.dataset.deleted is False for a in job.output_datasets ) ):
out.append( self.encode_all_ids( trans, job.to_dict( 'element' ), True ) )
return out
|
the-stack_0_22213 |
from gna.bindings import patchROOTClass, DataType, provided_precisions
import ROOT as R
from printing import printl, nextlevel
import types
import numpy as np
classes = tuple(R.OutputDescriptorT(ft, ft) for ft in provided_precisions)
classes_input = tuple(R.InputDescriptorT(ft, ft) for ft in provided_precisions)
classes_object = tuple(R.GNAObjectT(ft, ft) for ft in provided_precisions)
classes_td = tuple(R.TransformationDescriptorT(ft, ft) for ft in provided_precisions)
@patchROOTClass(classes, '__str__')
def OutputDescriptor____str__(self, **kwargs):
ret = '[out] {}: {}'.format(self.name(), self.check() and self.datatype() or 'invalid')
data, sl = kwargs.pop('data', False), kwargs.pop('slice', slice(None))
if data and self.check() and self.datatype():
values = str(self.data()[sl])
ret = ret+': '+values+'\n'
return ret
@patchROOTClass(classes, 'print')
def OutputDescriptor__print(self, **kwargs):
printl(OutputDescriptor____str__(self, **kwargs))
@patchROOTClass(classes, 'single')
def OutputDescriptor__single(self):
return self
@patchROOTClass(classes, '__rshift__')
def OutputDescriptor______rshift__(output, inputs):
if isinstance(inputs, classes_input):
inputs.connect(output)
elif isinstance(inputs, (list, tuple, types.GeneratorType)):
for inp in inputs:
OutputDescriptor______rshift__(output, inp)
elif isinstance(inputs, classes_object+classes_td):
OutputDescriptor______rshift__(output, inputs.single_input())
else:
raise Exception('Failed to connect {} to {}'.format(output.name(), inputs))
@patchROOTClass(classes, '__rlshift__')
def OutputDescriptor______rlshift__(output, inputs):
OutputDescriptor______rshift__(output, inputs)
@patchROOTClass(classes, '__gt__')
@patchROOTClass(classes, '__lt__')
def OutputDescriptor______cmp__(a, b):
raise Exception('Someone tried to use >/< operators. Perhaps you have meant >>/<< instead?')
@patchROOTClass(classes, 'data')
@patchROOTClass(classes, '__call__')
def OutputDescriptor__data(self):
buf = self.__data_orig()
datatype = self.datatype()
return np.frombuffer(buf, count=datatype.size(), dtype=buf.typecode).reshape(datatype.shape, order='F')
|
the-stack_0_22218 | import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JsonAdapter(Json):
"""
Customized psycopg2.extras.Json to allow for a custom encoder.
"""
def __init__(self, adapted, dumps=None, encoder=None):
self.encoder = encoder
super(JsonAdapter, self).__init__(adapted, dumps=dumps)
def dumps(self, obj):
options = {'cls': self.encoder} if self.encoder else {}
return json.dumps(obj, **options)
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def __init__(self, verbose_name=None, name=None, encoder=None, **kwargs):
if encoder and not callable(encoder):
raise ValueError("The encoder parameter must be a callable object.")
self.encoder = encoder
super(JSONField, self).__init__(verbose_name, name, **kwargs)
def db_type(self, connection):
return 'jsonb'
def deconstruct(self):
name, path, args, kwargs = super(JSONField, self).deconstruct()
if self.encoder is not None:
kwargs['encoder'] = self.encoder
return name, path, args, kwargs
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return JsonAdapter(value, encoder=self.encoder)
return value
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
options = {'cls': self.encoder} if self.encoder else {}
try:
json.dumps(value, **options)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "(%s -> %s)" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
the-stack_0_22220 | #!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A top level harness to run all unit-tests in a specific engine build.
"""
import argparse
import glob
import multiprocessing
import os
import re
import subprocess
import sys
import time
buildroot_dir = os.path.abspath(
os.path.join(os.path.realpath(__file__), '..', '..', '..')
)
out_dir = os.path.join(buildroot_dir, 'out')
golden_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'resources')
fonts_dir = os.path.join(
buildroot_dir, 'flutter', 'third_party', 'txt', 'third_party', 'fonts'
)
roboto_font_path = os.path.join(fonts_dir, 'Roboto-Regular.ttf')
font_subset_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'font-subset')
fml_unittests_filter = '--gtest_filter=-*TimeSensitiveTest*'
def PrintDivider(char='='):
print('\n')
for _ in range(4):
print(''.join([char for _ in range(80)]))
print('\n')
def RunCmd(cmd, forbidden_output=[], expect_failure=False, env=None, **kwargs):
command_string = ' '.join(cmd)
PrintDivider('>')
print('Running command "%s"' % command_string)
start_time = time.time()
stdout_pipe = sys.stdout if not forbidden_output else subprocess.PIPE
stderr_pipe = sys.stderr if not forbidden_output else subprocess.PIPE
process = subprocess.Popen(
cmd,
stdout=stdout_pipe,
stderr=stderr_pipe,
env=env,
universal_newlines=True,
**kwargs
)
stdout, stderr = process.communicate()
end_time = time.time()
if process.returncode != 0 and not expect_failure:
PrintDivider('!')
print(
'Failed Command:\n\n%s\n\nExit Code: %d\n' %
(command_string, process.returncode)
)
if stdout:
print('STDOUT: \n%s' % stdout)
if stderr:
print('STDERR: \n%s' % stderr)
PrintDivider('!')
raise Exception(
'Command "%s" exited with code %d.' %
(command_string, process.returncode)
)
if stdout or stderr:
print(stdout)
print(stderr)
for forbidden_string in forbidden_output:
if (stdout and forbidden_string in stdout) or (stderr and
forbidden_string in stderr):
raise Exception(
'command "%s" contained forbidden string %s' %
(command_string, forbidden_string)
)
PrintDivider('<')
print(
'Command run successfully in %.2f seconds: %s' %
(end_time - start_time, command_string)
)
def IsMac():
return sys.platform == 'darwin'
def IsLinux():
return sys.platform.startswith('linux')
def IsWindows():
return sys.platform.startswith(('cygwin', 'win'))
def ExecutableSuffix():
return '.exe' if IsWindows() else ''
def FindExecutablePath(path):
if os.path.exists(path):
return path
if IsWindows():
exe_path = path + '.exe'
if os.path.exists(exe_path):
return exe_path
bat_path = path + '.bat'
if os.path.exists(bat_path):
return bat_path
raise Exception('Executable %s does not exist!' % path)
def BuildEngineExecutableCommand(
build_dir, executable_name, flags=[], coverage=False, gtest=False
):
unstripped_exe = os.path.join(build_dir, 'exe.unstripped', executable_name)
# We cannot run the unstripped binaries directly when coverage is enabled.
if IsLinux() and os.path.exists(unstripped_exe) and not coverage:
# Use unstripped executables in order to get better symbolized crash
# stack traces on Linux.
executable = unstripped_exe
else:
executable = FindExecutablePath(os.path.join(build_dir, executable_name))
coverage_script = os.path.join(
buildroot_dir, 'flutter', 'build', 'generate_coverage.py'
)
if coverage:
coverage_flags = [
'-t', executable, '-o',
os.path.join(build_dir, 'coverage', executable_name), '-f', 'html'
]
updated_flags = ['--args=%s' % ' '.join(flags)]
test_command = [coverage_script] + coverage_flags + updated_flags
else:
test_command = [executable] + flags
if gtest:
gtest_parallel = os.path.join(
buildroot_dir, 'third_party', 'gtest-parallel', 'gtest-parallel'
)
test_command = ['python3', gtest_parallel] + test_command
return test_command
def RunEngineExecutable(
build_dir,
executable_name,
filter,
flags=[],
cwd=buildroot_dir,
forbidden_output=[],
expect_failure=False,
coverage=False,
extra_env={},
gtest=False
):
if filter is not None and executable_name not in filter:
print('Skipping %s due to filter.' % executable_name)
return
unstripped_exe = os.path.join(build_dir, 'exe.unstripped', executable_name)
env = os.environ.copy()
# We cannot run the unstripped binaries directly when coverage is enabled.
if IsLinux() and os.path.exists(unstripped_exe) and not coverage:
# Some tests depend on the EGL/GLES libraries placed in the build directory.
env['LD_LIBRARY_PATH'] = os.path.join(build_dir, 'lib.unstripped')
elif IsMac():
env['DYLD_LIBRARY_PATH'] = build_dir
else:
env['PATH'] = build_dir + ":" + env['PATH']
print('Running %s in %s' % (executable_name, cwd))
test_command = BuildEngineExecutableCommand(
build_dir,
executable_name,
flags=flags,
coverage=coverage,
gtest=gtest,
)
env['FLUTTER_BUILD_DIRECTORY'] = build_dir
for key, value in extra_env.items():
env[key] = value
try:
RunCmd(
test_command,
cwd=cwd,
forbidden_output=forbidden_output,
expect_failure=expect_failure,
env=env
)
except:
# The LUCI environment may provide a variable containing a directory path
# for additional output files that will be uploaded to cloud storage.
# If the command generated a core dump, then run a script to analyze
# the dump and output a report that will be uploaded.
luci_test_outputs_path = os.environ.get('FLUTTER_TEST_OUTPUTS_DIR')
core_path = os.path.join(cwd, 'core')
if luci_test_outputs_path and os.path.exists(core_path) and os.path.exists(
unstripped_exe):
dump_path = os.path.join(
luci_test_outputs_path, '%s_%s.txt' % (executable_name, sys.platform)
)
print('Writing core dump analysis to %s' % dump_path)
subprocess.call([
os.path.join(
buildroot_dir, 'flutter', 'testing', 'analyze_core_dump.sh'
),
buildroot_dir,
unstripped_exe,
core_path,
dump_path,
])
os.unlink(core_path)
raise
class EngineExecutableTask(object):
def __init__(
self,
build_dir,
executable_name,
filter,
flags=[],
cwd=buildroot_dir,
forbidden_output=[],
expect_failure=False,
coverage=False,
extra_env={}
):
self.build_dir = build_dir
self.executable_name = executable_name
self.filter = filter
self.flags = flags
self.cwd = cwd
self.forbidden_output = forbidden_output
self.expect_failure = expect_failure
self.coverage = coverage
self.extra_env = extra_env
def __call__(self, *args):
RunEngineExecutable(
self.build_dir,
self.executable_name,
self.filter,
flags=self.flags,
cwd=self.cwd,
forbidden_output=self.forbidden_output,
expect_failure=self.expect_failure,
coverage=self.coverage,
extra_env=self.extra_env,
)
def __str__(self):
command = BuildEngineExecutableCommand(
self.build_dir,
self.executable_name,
flags=self.flags,
coverage=self.coverage
)
return " ".join(command)
def RunCCTests(build_dir, filter, coverage, capture_core_dump):
print("Running Engine Unit-tests.")
if capture_core_dump and IsLinux():
import resource
resource.setrlimit(
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
)
shuffle_flags = [
"--gtest_repeat=2",
"--gtest_shuffle",
]
repeat_flags = [
"--repeat=2",
]
def make_test(name, flags=repeat_flags, extra_env={}):
return (name, flags, extra_env)
unittests = [
make_test('client_wrapper_glfw_unittests'),
make_test('client_wrapper_unittests'),
make_test('common_cpp_core_unittests'),
make_test('common_cpp_unittests'),
make_test('dart_plugin_registrant_unittests'),
make_test('display_list_rendertests'),
make_test('display_list_unittests'),
make_test('embedder_proctable_unittests'),
make_test('embedder_unittests'),
make_test('fml_unittests', flags=[fml_unittests_filter] + repeat_flags),
make_test('no_dart_plugin_registrant_unittests'),
make_test('runtime_unittests'),
make_test('testing_unittests'),
make_test('tonic_unittests'),
# The image release unit test can take a while on slow machines.
make_test('ui_unittests', flags=repeat_flags + ['--timeout=90']),
]
if not IsWindows():
unittests += [
# https://github.com/google/googletest/issues/2490
make_test('android_external_view_embedder_unittests'),
make_test('jni_unittests'),
make_test('platform_view_android_delegate_unittests'),
# https://github.com/flutter/flutter/issues/36295
make_test('shell_unittests'),
]
if IsWindows():
unittests += [
# The accessibility library only supports Mac and Windows.
make_test('accessibility_unittests'),
make_test('client_wrapper_windows_unittests'),
make_test('flutter_windows_unittests'),
]
# These unit-tests are Objective-C and can only run on Darwin.
if IsMac():
unittests += [
# The accessibility library only supports Mac and Windows.
make_test('accessibility_unittests'),
make_test('flutter_channels_unittests'),
]
if IsLinux():
flow_flags = [
'--golden-dir=%s' % golden_dir,
'--font-file=%s' % roboto_font_path,
]
icu_flags = [
'--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat')
]
unittests += [
make_test('flow_unittests', flags=repeat_flags + ['--'] + flow_flags),
make_test('flutter_glfw_unittests'),
make_test(
'flutter_linux_unittests', extra_env={'G_DEBUG': 'fatal-criticals'}
),
# https://github.com/flutter/flutter/issues/36296
make_test('txt_unittests', flags=repeat_flags + ['--'] + icu_flags),
]
else:
flow_flags = ['--gtest_filter=-PerformanceOverlayLayer.Gold']
unittests += [
make_test('flow_unittests', flags=repeat_flags + flow_flags),
]
for test, flags, extra_env in unittests:
RunEngineExecutable(
build_dir,
test,
filter,
flags,
coverage=coverage,
extra_env=extra_env,
gtest=True
)
if IsMac():
# flutter_desktop_darwin_unittests uses global state that isn't handled
# correctly by gtest-parallel.
# https://github.com/flutter/flutter/issues/104789
RunEngineExecutable(
build_dir,
'flutter_desktop_darwin_unittests',
filter,
shuffle_flags,
coverage=coverage
)
# Impeller tests are only supported on macOS for now.
RunEngineExecutable(
build_dir,
'impeller_unittests',
filter,
shuffle_flags,
coverage=coverage
)
def RunEngineBenchmarks(build_dir, filter):
print("Running Engine Benchmarks.")
icu_flags = [
'--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat')
]
RunEngineExecutable(build_dir, 'shell_benchmarks', filter, icu_flags)
RunEngineExecutable(build_dir, 'fml_benchmarks', filter, icu_flags)
RunEngineExecutable(build_dir, 'ui_benchmarks', filter, icu_flags)
if IsLinux():
RunEngineExecutable(build_dir, 'txt_benchmarks', filter, icu_flags)
def GatherDartTest(
build_dir,
test_packages,
dart_file,
verbose_dart_snapshot,
multithreaded,
enable_observatory=False,
expect_failure=False,
alternative_tester=False
):
kernel_file_name = os.path.basename(dart_file) + '.dill'
kernel_file_output = os.path.join(build_dir, 'gen', kernel_file_name)
error_message = "%s doesn't exist. Please run the build that populates %s" % (
kernel_file_output, build_dir
)
assert os.path.isfile(kernel_file_output), error_message
command_args = []
if not enable_observatory:
command_args.append('--disable-observatory')
dart_file_contents = open(dart_file, 'r')
custom_options = re.findall(
"// FlutterTesterOptions=(.*)", dart_file_contents.read()
)
dart_file_contents.close()
command_args.extend(custom_options)
command_args += [
'--use-test-fonts',
'--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat'),
'--flutter-assets-dir=%s' %
os.path.join(build_dir, 'gen', 'flutter', 'lib', 'ui', 'assets'),
'--disable-asset-fonts',
kernel_file_output,
]
if multithreaded:
threading = 'multithreaded'
command_args.insert(0, '--force-multithreading')
else:
threading = 'single-threaded'
tester_name = 'flutter_tester'
if alternative_tester:
tester_name = 'flutter_tester_fractional_translation'
print(
"Running test '%s' using '%s' (%s)" %
(kernel_file_name, tester_name, threading)
)
forbidden_output = [] if 'unopt' in build_dir or expect_failure else [
'[ERROR'
]
return EngineExecutableTask(
build_dir,
tester_name,
None,
command_args,
forbidden_output=forbidden_output,
expect_failure=expect_failure,
)
def EnsureDebugUnoptSkyPackagesAreBuilt():
variant_out_dir = os.path.join(out_dir, 'host_debug_unopt')
message = []
message.append('gn --runtime-mode debug --unopt --no-lto')
message.append('ninja -C %s flutter/sky/packages' % variant_out_dir)
final_message = '%s doesn\'t exist. Please run the following commands: \n%s' % (
variant_out_dir, '\n'.join(message)
)
assert os.path.exists(variant_out_dir), final_message
def EnsureIosTestsAreBuilt(ios_out_dir):
"""Builds the engine variant and the test dylib containing the XCTests"""
tmp_out_dir = os.path.join(out_dir, ios_out_dir)
ios_test_lib = os.path.join(tmp_out_dir, 'libios_test_flutter.dylib')
message = []
message.append(
'gn --ios --unoptimized --runtime-mode=debug --no-lto --simulator'
)
message.append('autoninja -C %s ios_test_flutter' % ios_out_dir)
final_message = '%s or %s doesn\'t exist. Please run the following commands: \n%s' % (
ios_out_dir, ios_test_lib, '\n'.join(message)
)
assert os.path.exists(tmp_out_dir
) and os.path.exists(ios_test_lib), final_message
def AssertExpectedXcodeVersion():
"""Checks that the user has a version of Xcode installed"""
version_output = subprocess.check_output(['xcodebuild', '-version'])
match = re.match(b"Xcode (\d+)", version_output)
message = "Xcode must be installed to run the iOS embedding unit tests"
assert match, message
def JavaHome():
script_path = os.path.dirname(os.path.realpath(__file__))
if IsMac():
return os.path.join(
script_path, '..', '..', 'third_party', 'java', 'openjdk', 'Contents',
'Home'
)
else:
return os.path.join(
script_path, '..', '..', 'third_party', 'java', 'openjdk'
)
def JavaBin():
return os.path.join(JavaHome(), 'bin', 'java.exe' if IsWindows() else 'java')
def RunJavaTests(filter, android_variant='android_debug_unopt'):
"""Runs the Java JUnit unit tests for the Android embedding"""
test_runner_dir = os.path.join(
buildroot_dir, 'flutter', 'shell', 'platform', 'android', 'test_runner'
)
gradle_bin = os.path.join(
buildroot_dir, 'third_party', 'gradle', 'bin',
'gradle.bat' if IsWindows() else 'gradle'
)
flutter_jar = os.path.join(out_dir, android_variant, 'flutter.jar')
android_home = os.path.join(
buildroot_dir, 'third_party', 'android_tools', 'sdk'
)
build_dir = os.path.join(
out_dir, android_variant, 'robolectric_tests', 'build'
)
gradle_cache_dir = os.path.join(
out_dir, android_variant, 'robolectric_tests', '.gradle'
)
test_class = filter if filter else '*'
command = [
gradle_bin,
'-Pflutter_jar=%s' % flutter_jar,
'-Pbuild_dir=%s' % build_dir,
'testDebugUnitTest',
'--tests=%s' % test_class,
'--rerun-tasks',
'--no-daemon',
'--project-cache-dir=%s' % gradle_cache_dir,
'--gradle-user-home=%s' % gradle_cache_dir,
]
env = dict(os.environ, ANDROID_HOME=android_home, JAVA_HOME=JavaHome())
RunCmd(command, cwd=test_runner_dir, env=env)
def RunAndroidTests(android_variant='android_debug_unopt', adb_path=None):
test_runner_name = 'flutter_shell_native_unittests'
tests_path = os.path.join(out_dir, android_variant, test_runner_name)
remote_path = '/data/local/tmp'
remote_tests_path = os.path.join(remote_path, test_runner_name)
if adb_path == None:
adb_path = 'adb'
RunCmd([adb_path, 'push', tests_path, remote_path], cwd=buildroot_dir)
RunCmd([adb_path, 'shell', remote_tests_path])
systrace_test = os.path.join(
buildroot_dir, 'flutter', 'testing', 'android_systrace_test.py'
)
scenario_apk = os.path.join(
out_dir, android_variant, 'firebase_apks', 'scenario_app.apk'
)
RunCmd([
systrace_test, '--adb-path', adb_path, '--apk-path', scenario_apk,
'--package-name', 'dev.flutter.scenarios', '--activity-name',
'.TextPlatformViewActivity'
])
def RunObjcTests(ios_variant='ios_debug_sim_unopt', test_filter=None):
"""Runs Objective-C XCTest unit tests for the iOS embedding"""
AssertExpectedXcodeVersion()
ios_out_dir = os.path.join(out_dir, ios_variant)
EnsureIosTestsAreBuilt(ios_out_dir)
ios_unit_test_dir = os.path.join(
buildroot_dir, 'flutter', 'testing', 'ios', 'IosUnitTests'
)
# Avoid using xcpretty unless the following can be addressed:
# - Make sure all relevant failure output is printed on a failure.
# - Make sure that a failing exit code is set for CI.
# See https://github.com/flutter/flutter/issues/63742
command = [
'xcodebuild '
'-sdk iphonesimulator '
'-scheme IosUnitTests '
"-destination platform='iOS Simulator,name=iPhone 11' "
'test '
'FLUTTER_ENGINE=' + ios_variant
]
if test_filter != None:
command[0] = command[0] + " -only-testing:%s" % test_filter
RunCmd(command, cwd=ios_unit_test_dir, shell=True)
def GatherDartTests(build_dir, filter, verbose_dart_snapshot):
dart_tests_dir = os.path.join(
buildroot_dir,
'flutter',
'testing',
'dart',
)
# This one is a bit messy. The pubspec.yaml at flutter/testing/dart/pubspec.yaml
# has dependencies that are hardcoded to point to the sky packages at host_debug_unopt/
# Before running Dart tests, make sure to run just that target (NOT the whole engine)
EnsureDebugUnoptSkyPackagesAreBuilt()
# Now that we have the Sky packages at the hardcoded location, run `dart pub get`.
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=['pub', 'get', '--offline'],
cwd=dart_tests_dir,
)
dart_observatory_tests = glob.glob(
'%s/observatory/*_test.dart' % dart_tests_dir
)
dart_tests = glob.glob('%s/*_test.dart' % dart_tests_dir)
test_packages = os.path.join(dart_tests_dir, '.packages')
if 'release' not in build_dir:
for dart_test_file in dart_observatory_tests:
if filter is not None and os.path.basename(dart_test_file) not in filter:
print("Skipping '%s' due to filter." % dart_test_file)
else:
print(
"Gathering dart test '%s' with observatory enabled" % dart_test_file
)
yield GatherDartTest(
build_dir, test_packages, dart_test_file, verbose_dart_snapshot,
True, True
)
yield GatherDartTest(
build_dir, test_packages, dart_test_file, verbose_dart_snapshot,
False, True
)
# Smoke test with tester variant that has no raster cache and enabled fractional translation
yield GatherDartTest(
build_dir, test_packages, dart_test_file, verbose_dart_snapshot,
False, True, True
)
for dart_test_file in dart_tests:
if filter is not None and os.path.basename(dart_test_file) not in filter:
print("Skipping '%s' due to filter." % dart_test_file)
else:
print("Gathering dart test '%s'" % dart_test_file)
yield GatherDartTest(
build_dir, test_packages, dart_test_file, verbose_dart_snapshot, True
)
yield GatherDartTest(
build_dir, test_packages, dart_test_file, verbose_dart_snapshot, False
)
def GatherDartSmokeTest(build_dir, verbose_dart_snapshot):
smoke_test = os.path.join(
buildroot_dir, "flutter", "testing", "smoke_test_failure",
"fail_test.dart"
)
test_packages = os.path.join(
buildroot_dir, "flutter", "testing", "smoke_test_failure", ".packages"
)
yield GatherDartTest(
build_dir,
test_packages,
smoke_test,
verbose_dart_snapshot,
True,
expect_failure=True
)
yield GatherDartTest(
build_dir,
test_packages,
smoke_test,
verbose_dart_snapshot,
False,
expect_failure=True
)
def GatherFrontEndServerTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'flutter_frontend_server')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev', dart_test_file, build_dir,
os.path.join(build_dir, 'gen', 'frontend_server.dart.snapshot'),
os.path.join(build_dir, 'flutter_patched_sdk')
]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def GatherConstFinderTests(build_dir):
test_dir = os.path.join(
buildroot_dir, 'flutter', 'tools', 'const_finder', 'test'
)
opts = [
'--disable-dart-dev',
os.path.join(test_dir, 'const_finder_test.dart'),
os.path.join(build_dir, 'gen', 'frontend_server.dart.snapshot'),
os.path.join(build_dir, 'flutter_patched_sdk')
]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def GatherLitetestTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'litetest')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = ['--disable-dart-dev', dart_test_file]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def RunBenchmarkTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'benchmark')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = ['--disable-dart-dev', dart_test_file]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def GatherGithooksTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'githooks')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = ['--disable-dart-dev', dart_test_file]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def GatherClangTidyTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'clang_tidy')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev', dart_test_file,
os.path.join(build_dir, 'compile_commands.json'),
os.path.join(buildroot_dir, 'flutter')
]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def GatherApiConsistencyTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'api_check')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev', dart_test_file,
os.path.join(buildroot_dir, 'flutter')
]
yield EngineExecutableTask(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir
)
def RunEngineTasksInParallel(tasks):
# Work around a bug in Python.
#
# The multiprocessing package relies on the win32 WaitForMultipleObjects()
# call, which supports waiting on a maximum of MAXIMUM_WAIT_OBJECTS (defined
# by Windows to be 64) handles, processes in this case. To avoid hitting
# this, we limit ourselves to 60 handles (since there are a couple extra
# processes launched for the queue reader and thread wakeup reader).
#
# See: https://bugs.python.org/issue26903
max_processes = multiprocessing.cpu_count()
if sys.platform.startswith(('cygwin', 'win')) and max_processes > 60:
max_processes = 60
pool = multiprocessing.Pool(processes=max_processes)
async_results = [(t, pool.apply_async(t, ())) for t in tasks]
failures = []
for task, async_result in async_results:
try:
async_result.get()
except Exception as exn:
failures += [(task, exn)]
if len(failures) > 0:
print("The following commands failed:")
for task, exn in failures:
print("%s\n" % str(task))
raise Exception()
def main():
parser = argparse.ArgumentParser()
all_types = [
'engine', 'dart', 'benchmarks', 'java', 'android', 'objc', 'font-subset'
]
parser.add_argument(
'--variant',
dest='variant',
action='store',
default='host_debug_unopt',
help='The engine build variant to run the tests for.'
)
parser.add_argument(
'--type',
type=str,
default='all',
help='A list of test types, default is "all" (equivalent to "%s")' %
(','.join(all_types))
)
parser.add_argument(
'--engine-filter',
type=str,
default='',
help='A list of engine test executables to run.'
)
parser.add_argument(
'--dart-filter',
type=str,
default='',
help='A list of Dart test scripts to run.'
)
parser.add_argument(
'--java-filter',
type=str,
default='',
help='A single Java test class to run (example: "io.flutter.SmokeTest")'
)
parser.add_argument(
'--android-variant',
dest='android_variant',
action='store',
default='android_debug_unopt',
help='The engine build variant to run java or android tests for'
)
parser.add_argument(
'--ios-variant',
dest='ios_variant',
action='store',
default='ios_debug_sim_unopt',
help='The engine build variant to run objective-c tests for'
)
parser.add_argument(
'--verbose-dart-snapshot',
dest='verbose_dart_snapshot',
action='store_true',
default=False,
help='Show extra dart snapshot logging.'
)
parser.add_argument(
'--objc-filter',
type=str,
default=None,
help='Filter parameter for which objc tests to run (example: "IosUnitTestsTests/SemanticsObjectTest/testShouldTriggerAnnouncement")'
)
parser.add_argument(
'--coverage',
action='store_true',
default=None,
help='Generate coverage reports for each unit test framework run.'
)
parser.add_argument(
'--engine-capture-core-dump',
dest='engine_capture_core_dump',
action='store_true',
default=False,
help='Capture core dumps from crashes of engine tests.'
)
parser.add_argument(
'--use-sanitizer-suppressions',
dest='sanitizer_suppressions',
action='store_true',
default=False,
help='Provide the sanitizer suppressions lists to the via environment to the tests.'
)
parser.add_argument(
'--adb-path',
dest='adb_path',
action='store',
default=None,
help='Provide the path of adb used for android tests. By default it looks on $PATH.'
)
args = parser.parse_args()
if args.type == 'all':
types = all_types
else:
types = args.type.split(',')
build_dir = os.path.join(out_dir, args.variant)
if args.type != 'java' and args.type != 'android':
assert os.path.exists(
build_dir
), 'Build variant directory %s does not exist!' % build_dir
if args.sanitizer_suppressions:
assert IsLinux() or IsMac(
), "The sanitizer suppressions flag is only supported on Linux and Mac."
file_dir = os.path.dirname(os.path.abspath(__file__))
command = [
"env", "-i", "bash", "-c",
"source {}/sanitizer_suppressions.sh >/dev/null && env"
.format(file_dir)
]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
for line in process.stdout:
key, _, value = line.decode('ascii').strip().partition("=")
os.environ[key] = value
process.communicate() # Avoid pipe deadlock while waiting for termination.
engine_filter = args.engine_filter.split(',') if args.engine_filter else None
if 'engine' in types:
RunCCTests(
build_dir, engine_filter, args.coverage, args.engine_capture_core_dump
)
if 'dart' in types:
assert not IsWindows(
), "Dart tests can't be run on windows. https://github.com/flutter/flutter/issues/36301."
dart_filter = args.dart_filter.split(',') if args.dart_filter else None
tasks = list(GatherDartSmokeTest(build_dir, args.verbose_dart_snapshot))
tasks += list(GatherLitetestTests(build_dir))
tasks += list(GatherGithooksTests(build_dir))
tasks += list(GatherClangTidyTests(build_dir))
tasks += list(GatherApiConsistencyTests(build_dir))
tasks += list(GatherConstFinderTests(build_dir))
tasks += list(GatherFrontEndServerTests(build_dir))
tasks += list(
GatherDartTests(build_dir, dart_filter, args.verbose_dart_snapshot)
)
RunEngineTasksInParallel(tasks)
if 'java' in types:
assert not IsWindows(), "Android engine files can't be compiled on Windows."
java_filter = args.java_filter
if ',' in java_filter or '*' in java_filter:
print(
'Can only filter JUnit4 tests by single entire class name, eg "io.flutter.SmokeTest". Ignoring filter='
+ java_filter
)
java_filter = None
RunJavaTests(java_filter, args.android_variant)
if 'android' in types:
assert not IsWindows(), "Android engine files can't be compiled on Windows."
RunAndroidTests(args.android_variant, args.adb_path)
if 'objc' in types:
assert IsMac(), "iOS embedding tests can only be run on macOS."
RunObjcTests(args.ios_variant, args.objc_filter)
# https://github.com/flutter/flutter/issues/36300
if 'benchmarks' in types and not IsWindows():
RunBenchmarkTests(build_dir)
RunEngineBenchmarks(build_dir, engine_filter)
variants_to_skip = ['host_release', 'host_profile']
if ('engine' in types or
'font-subset' in types) and args.variant not in variants_to_skip:
RunCmd(['python3', 'test.py'], cwd=font_subset_dir)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_22221 | # qubit number=4
# total number=50
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[3],input_qubit[0]) # number=38
prog.z(input_qubit[3]) # number=39
prog.cx(input_qubit[3],input_qubit[0]) # number=40
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[2],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.y(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.h(input_qubit[2]) # number=44
prog.cz(input_qubit[0],input_qubit[2]) # number=45
prog.h(input_qubit[2]) # number=46
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.h(input_qubit[2]) # number=47
prog.cz(input_qubit[0],input_qubit[2]) # number=48
prog.h(input_qubit[2]) # number=49
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3424.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_22225 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-return-doc
"""Sampler decorator module for sampling of continuous pulses to discrete pulses to be
exposed to user.
Some atypical boilerplate has been added to solve the problem of decorators not preserving
their wrapped function signatures. Below we explain the problem that samplers solve and how
we implement this.
A sampler is a function that takes an continuous pulse function with signature:
```python
def f(times: np.ndarray, *args, **kwargs) -> np.ndarray:
...
```
and returns a new function:
def f(duration: int, *args, **kwargs) -> SamplePulse:
...
Samplers are used to build up pulse commands from continuous pulse functions.
In Python the creation of a dynamic function that wraps another function will cause
the underlying signature and documentation of the underlying function to be overwritten.
In order to circumvent this issue the Python standard library provides the decorator
`functools.wraps` which allows the programmer to expose the names and signature of the
wrapped function as those of the dynamic function.
Samplers are implemented by creating a function with signature
@sampler
def left(continuous_pulse: Callable, duration: int, *args, **kwargs)
...
This will create a sampler function for `left`. Since it is a dynamic function it would not
have the docstring of `left` available too `help`. This could be fixed by wrapping with
`functools.wraps` in the `sampler`, but this would then cause the signature to be that of the
sampler function which is called on the continuous pulse, below:
`(continuous_pulse: Callable, duration: int, *args, **kwargs)``
This is not correct for the sampler as the output sampled functions accept only a function.
For the standard sampler we get around this by not using `functools.wraps` and
explicitly defining our samplers such as `left`, `right` and `midpoint` and
calling `sampler` internally on the function that implements the sampling schemes such as
`left_sample`, `right_sample` and `midpoint_sample` respectively. See `left` for an example of this.
In this way our standard samplers will expose the proper help signature, but a user can
still create their own sampler with
@sampler
def custom_sampler(time, *args, **kwargs):
...
However, in this case it will be missing documentation of the underlying sampling methods.
We believe that the definition of custom samplers will be rather infrequent.
However, users will frequently apply sampler instances too continuous pulses. Therefore, a different
approach was required for sampled continuous functions (the output of an continuous pulse function
decorated by a sampler instance).
A sampler instance is a decorator that may be used to wrap continuous pulse functions such as
linear below:
```python
@left
def linear(times: np.ndarray, m: float, b: float) -> np.ndarray:
```Linear test function
Args:
times: Input times.
m: Slope.
b: Intercept
Returns:
np.ndarray
```
return m*times+b
```
Which after decoration may be called with a duration rather than an array of times
```python
duration = 10
pulse_command = linear(10, 0.1, 0.1)
```
If one calls help on `linear` they will find
```
linear(duration:int, *args, **kwargs) -> numpy.ndarray
Discretized continuous pulse function: `linear` using
sampler: `_left`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
function linear in module test.python.pulse.test_samplers
linear(x:numpy.ndarray, m:float, b:float) -> numpy.ndarray
Linear test function
Args:
x: Input times.
m: Slope.
b: Intercept
Returns:
np.ndarray
```
This is partly because `functools.wraps` has been used on the underlying function.
This in itself is not sufficient as the signature of the sampled function has
`duration`, whereas the signature of the continuous function is `time`.
This is achieved by removing `__wrapped__` set by `functools.wraps` in order to preserve
the correct signature and also applying `_update_annotations` and `_update_docstring`
to the generated function which corrects the function annotations and adds an informative
docstring respectively.
The user therefore has access to the correct sampled function docstring in its entirety, while
still seeing the signature for the continuous pulse function and all of its arguments.
"""
import functools
from typing import Callable
import textwrap
import pydoc
import numpy as np
from qiskit.pulse.commands.sample_pulse import SamplePulse
from qiskit.pulse.commands.pulse_decorators import functional_pulse
from . import strategies
def _update_annotations(discretized_pulse: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function with duration.
Args:
discretized_pulse: Discretized decorated continuous pulse.
"""
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse
def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function.
Args:
discretized_pulse: Discretized decorated continuous pulse.
sampler_inst: Applied sampler.
"""
wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')
header, body = wrapped_docstring.split('\n', 1)
body = textwrap.indent(body, ' ')
wrapped_docstring = header+body
updated_ds = """
Discretized continuous pulse function: `{continuous_name}` using
sampler: `{sampler_name}`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
{continuous_doc}
""".format(continuous_name=discretized_pulse.__name__,
sampler_name=sampler_inst.__name__,
continuous_doc=wrapped_docstring)
discretized_pulse.__doc__ = updated_ds
return discretized_pulse
def sampler(sample_function: Callable) -> Callable:
"""Sampler decorator base method.
Samplers are used for converting an continuous function to a discretized pulse.
They operate on a function with the signature:
`def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`
Where `times` is a numpy array of floats with length n_times and the output array
is a complex numpy array with length n_times. The output of the decorator is an
instance of `FunctionalPulse` with signature:
`def g(duration: int, *args, **kwargs) -> SamplePulse`
Note if your continuous pulse function outputs a `complex` scalar rather than a
`np.ndarray`, you should first vectorize it before applying a sampler.
This class implements the sampler boilerplate for the sampler.
Args:
sample_function: A sampler function to be decorated.
"""
def generate_sampler(continuous_pulse: Callable) -> Callable:
"""Return a decorated sampler function."""
@functools.wraps(continuous_pulse)
def call_sampler(duration: int, *args, **kwargs) -> SamplePulse:
"""Replace the call to the continuous function with a call to the sampler applied
to the analytic pulse function."""
sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)
return np.asarray(sampled_pulse, dtype=np.complex_)
# Update type annotations for wrapped continuous function to be discrete
call_sampler = _update_annotations(call_sampler)
# Update docstring with that of the sampler and include sampled function documentation.
call_sampler = _update_docstring(call_sampler, sample_function)
# Unset wrapped to return base sampler signature
# but still get rest of benefits of wraps
# such as __name__, __qualname__
call_sampler.__dict__.pop('__wrapped__')
# wrap with functional pulse
return functional_pulse(call_sampler)
return generate_sampler
def left(continuous_pulse: Callable) -> Callable:
r"""Left sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<=t<\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.left_sample)(continuous_pulse)
def right(continuous_pulse: Callable) -> Callable:
r"""Right sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<t<=\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.right_sample)(continuous_pulse)
def midpoint(continuous_pulse: Callable) -> Callable:
r"""Midpoint sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t+0.5) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<=t<\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.midpoint_sample)(continuous_pulse)
|
the-stack_0_22229 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thread based executor
"""
import Queue
import threading
from aria.utils import imports
from .base import BaseExecutor
class ThreadExecutor(BaseExecutor):
"""
Executor which runs tasks in a separate thread. It's easier writing tests
using this executor rather than the full blown subprocess executor.
Note: This executor is not capable of running plugin operations.
"""
def __init__(self, pool_size=1, *args, **kwargs):
super(ThreadExecutor, self).__init__(*args, **kwargs)
self._stopped = False
self._queue = Queue.Queue()
self._pool = []
for i in range(pool_size):
name = 'ThreadExecutor-{index}'.format(index=i+1)
thread = threading.Thread(target=self._processor, name=name)
thread.daemon = True
thread.start()
self._pool.append(thread)
def execute(self, task):
self._queue.put(task)
def close(self):
self._stopped = True
for thread in self._pool:
thread.join()
def _processor(self):
while not self._stopped:
try:
task = self._queue.get(timeout=1)
self._task_started(task)
try:
task_func = imports.load_attribute(task.operation_mapping)
task_func(ctx=task.context, **task.inputs)
self._task_succeeded(task)
except BaseException as e:
self._task_failed(task, exception=e)
# Daemon threads
except BaseException:
pass
|
the-stack_0_22231 | import matplotlib.pyplot as plt
import numpy as np
def bars(predictions, plt_name="graph"):
prediction = predictions
plt.style.use('ggplot')
fig, axes = plt.subplots(ncols=1, nrows=1)
ax3= axes
x = np.arange(3)
y1, y2, y3 = prediction
width = 0.20
plt.title('%s\n Polarity Score for POS, NEG, NEU' % plt_name)
plt.xlabel('Parameters')
plt.ylabel('Score')
ax3.bar(x, y1, width, label="Correct Positives")
ax3.bar(x + width, y2, width, color=list(plt.rcParams['axes.prop_cycle'])[2]['color'], label="Correct Negatives")
ax3.bar(x + width + width, y3, width, color=list(plt.rcParams['axes.prop_cycle'])[3]['color'], label="Correct Neutrals")
ax3.set_xticks(x + width)
ax3.set_xticklabels(['Positive', 'Negative', 'Neutral'])
plt.legend()
plt.show()
def biplt(groundTruth, predictedValues, plt_name='<name>'):
gt = groundTruth
pr = predictedValues
x = np.arange(3)
y1, y2 = gt.values, pr.values
fig, axes = plt.subplots(ncols=1, nrows=1)
width = 0.20
plt.title('Accuracy with \n %s' % plt_name)
plt.xlabel('Parameters')
plt.ylabel('Score')
axes.bar(x, y1, width, label="Ground Truth")
axes.bar(x + width, y2, width, color=list(plt.rcParams['axes.prop_cycle'])[2]['color'], label="Predicted")
axes.set_xticks(x + width/2)
axes.set_xticklabels(['Positive', 'Negative', 'Neutral'])
plt.legend()
plt.show()
# y, z, k = prediction
#
# ax = plt.subplot(111)
# ax.bar(x - 0.2, y, width=0.2, color='b', align='center')
# ax.bar(x, z, width=0.2, color='g', align='center')
# ax.bar(x + 0.2, k, width=0.2, color='r', align='center')
# ax.xaxis_date()
#
# plt.show()
def uslplt(groundTruth, predictedValues, plt_name="<name>"):
# data to plot
n_groups = 3
gt = groundTruth.values
pr = predictedValues.values
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, gt, bar_width,
alpha=opacity,
color='b',
label='Frank')
rects2 = plt.bar(index + bar_width, pr, bar_width,
alpha=opacity,
color='g',
label='Guido')
plt.xlabel('Person')
plt.ylabel('Scores')
plt.title('%s\nScores by person'%plt_name)
plt.xticks(index + bar_width, ('A', 'B', 'C'))
plt.legend()
plt.tight_layout()
plt.show()
def stackplotter(HighlyNEG, ModeratelyNEG, NEG, HighlyPOS, ModeratelyPOS, POS, text):
H_NG, M_NG, NG, H_PS, M_PS, PS = HighlyNEG, ModeratelyNEG, NEG, HighlyPOS, ModeratelyPOS, POS
Polarities = [H_NG, M_NG, NG, H_PS, M_PS, PS]
N = len(Polarities) # Number of Bars
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, Polarities, width)
plt.xlabel('Polarities')
plt.ylabel('Scores')
plt.title(text)
plt.xticks(ind, (
'Highly Negative', 'Moderately Negative', 'Negative', 'Highly Positive', 'Moderately Positive', 'Positive'))
# plt.yticks(np.arange(0, 200, 10))
# plt.legend(p1[0], 'Polarity')
plt.show()
def simple_plot(dataframe):
dataframe.plot.barh(stacked=True)
plt.show()
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.pyplot as plt
# import numpy as np
# fig = plt.figure()
# ax = fig.add_subplot(111, projection="3d")
# ax.set_xlabel("x")
# ax.set_ylabel("y")
# ax.set_zlabel("z")
# ax.set_xlim3d(0, 10)
# ax.set_ylim3d(0, 10)
# xpos = [2, 5, 8, 2, 5, 8, 2, 5, 8]
# ypos = [1, 1, 1, 5, 5, 5, 9, 9, 9]
# zpos = np.zeros(9)
# dx = np.ones(9)
# dy = np.ones(9)
# dz = [np.random.random(9) for i in range(4)] # the heights of the 4 bar sets
# print(dz)
# _zpos = zpos # the starting zpos for each bar
# colors = ['r', 'b', 'g', 'y', 'r', 'b', 'y']
# for i in range(4):
# ax.bar3d(xpos, ypos, _zpos, dx, dy, dz[i], color=colors[i])
# _zpos += dz[i] # add the height of each bar to know where to start the next
# print(_zpos)
# plt.gca().invert_xaxis()
# plt.legend()
# plt.show()
|
the-stack_0_22233 | """Provides device automations for Device tracker."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_HOME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from .const import DOMAIN
CONDITION_TYPES = {"is_home", "is_not_home"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device conditions for Device tracker devices."""
registry = entity_registry.async_get(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add conditions for each entity that belongs to this integration
base_condition = {
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
}
conditions += [{**base_condition, CONF_TYPE: cond} for cond in CONDITION_TYPES]
return conditions
@callback
def async_condition_from_config(
hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
reverse = config[CONF_TYPE] == "is_not_home"
@callback
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
result = condition.state(hass, config[ATTR_ENTITY_ID], STATE_HOME)
if reverse:
result = not result
return result
return test_is_state
|
the-stack_0_22234 | import sys
from configparser import ConfigParser
from typing import List
class IniParserSingleton:
config_path: str = "config.ini"
global_section: str = "global"
environment: str
available_environments: List[str]
configparser_obj: ConfigParser
class IniParserError(Exception):
pass
def _init_configparser(self):
self.configparser_obj = ConfigParser()
self.configparser_obj.read(self.config_path)
self.available_environments = self.configparser_obj.sections()
self.available_environments.remove(self.global_section)
def __init__(self, config_path: str = "", global_section: str = ""):
if config_path:
self.config_path = config_path
if global_section:
self.global_section = global_section
self._init_configparser()
def _resolve_value(self, key: str):
environment_config = self.configparser_obj[self.environment]
if key in environment_config:
return environment_config[key]
global_config = self.configparser_obj[self.global_section]
if key in global_config:
return global_config[key]
raise self.IniParserError(
f"Key `{key}` is not found in `global` section neither `{self.environment}`"
)
@property
def _available_environments_text(self):
return f" | Available enviroments: {self.available_environments}"
def get_environment_as_cmd_arg(self):
args = sys.argv
if len(args) != 2 or args[1] not in self.available_environments:
raise self.IniParserError(
"Usage: python3 [file.py] [enviroment]"
+ self._available_environments_text
)
self.environment = args[1]
return self
def get_environment_from_file(self, file_path: str):
try:
with open(file_path, "r") as f:
environment = f.read().replace("\n", "")
except FileNotFoundError:
raise self.IniParserError(
f"Please, create file `{file_path}` with available environment"
+ self._available_environments_text
) from FileNotFoundError
if not environment in self.available_environments:
raise self.IniParserError(
f"Environment `{environment}` in file `{file_path}` is incorrect"
+ self._available_environments_text
)
self.environment = environment
return self
def config(self, key: str, converter=None):
"""gets key from config.ini and if converter, returns converter(value) else just value"""
value = self._resolve_value(key)
if converter:
return converter(value)
return value
if __name__ == "__main__":
ini_parser_singleton = IniParserSingleton().get_environment_as_cmd_arg()
config = ini_parser_singleton.config
...
PORT = config("PORT", int)
|
the-stack_0_22235 | import pdb
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
log_path = '/home/li.baol/GIT/dlrm/inference_data/colocation/64_bkgd/half/'
batch_list = ['64', '128', '256', '512']
median_ls1 = []
mean_ls1 = []
tail_ls1 = []
median_ls2 = []
mean_ls2 = []
tail_ls2 = []
for batch in batch_list:
with open(log_path+batch+'_diff_1.json') as f:
read = json.load(f)
# first inference is extremely slow
# read = read[1:]
median_ls1.append(round(np.median(read),3))
mean_ls1.append((round(np.mean(read),3)))
tail_ls1.append(round(np.percentile(read,95),3))
with open(log_path+batch+'_diff_2.json') as f:
read = json.load(f)
# first inference is extremely slow
# read = read[1:]
median_ls2.append(round(np.median(read),3))
mean_ls2.append((round(np.mean(read),3)))
tail_ls2.append(round(np.percentile(read,95),3))
log_path = '/home/li.baol/GIT/dlrm/inference_data/colocation/64_bkgd/empty/'
median_base1 = []
mean_base1 = []
tail_base1 = []
median_base2 = []
mean_base2 = []
tail_base2 = []
for batch in batch_list:
with open(log_path+batch+'_diff_1.json') as f:
read = json.load(f)
# first inference is extremely slow
# read = read[1:]
median_base1.append(round(np.median(read),3))
mean_base1.append((round(np.mean(read),3)))
tail_base1.append(round(np.percentile(read,95),3))
with open(log_path+batch+'_diff_2.json') as f:
read = json.load(f)
# first inference is extremely slow
# read = read[1:]
median_base2.append(round(np.median(read),3))
mean_base2.append((round(np.mean(read),3)))
tail_base2.append(round(np.percentile(read,95),3))
median_ls1 = np.asarray(median_ls1)
mean_ls1 = np.asarray(mean_ls1)
tail_ls1 = np.asarray(tail_ls1)
median_base1 = np.asarray(median_base1)
mean_base1 = np.asarray(mean_base1)
tail_base1 = np.asarray(tail_base1)
median_ls2 = np.asarray(median_ls2)
mean_ls2 = np.asarray(mean_ls2)
tail_ls2 = np.asarray(tail_ls2)
median_base2 = np.asarray(median_base2)
mean_base2 = np.asarray(mean_base2)
tail_base2 = np.asarray(tail_base2)
median_norm1 = median_ls1 / median_base1
mean_norm1 = mean_ls1 / mean_base1
tail_norm1 = tail_ls1 / tail_base1
median_norm2 = median_ls2 / median_base2
mean_norm2 = mean_ls2 / mean_base2
tail_norm2 = tail_ls2 / tail_base2
############## plotting ################
x = np.arange(len(batch_list)) # the label locations
width = 0.2 # the width of the bars
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))
ax1.bar(x - width, mean_norm1, width, label='mean')
ax1.bar(x, median_norm1, width, label='median')
ax1.bar(x + width, tail_norm1, width, label='95th tail')
ax1.set_ylabel('Normalized value (lower is better)')
ax1.set_ylim(0,5)
ax1.set_xlabel('batch size')
ax1.set_title('Background process using same cpu')
ax1.set_xticks(x)
ax1.set_xticklabels(batch_list)
#ax1.legend()
ax1.grid(axis='y', linestyle=':', color='black')
ax2.bar(x - width, mean_norm2, width, label='mean')
ax2.bar(x, median_norm2, width, label='median')
ax2.bar(x + width, tail_norm2, width, label='95th tail')
ax2.set_ylabel('Normalized value (lower is better)')
ax2.set_ylim(0,5)
ax2.set_xlabel('batch size')
ax2.set_title('Background process using diff cpu')
ax2.set_xticks(x)
ax2.set_xticklabels(batch_list)
ax2.legend()
ax2.grid(axis='y', linestyle=':', color='black')
#plt.show()
plt.savefig('plot2.png')
|
the-stack_0_22236 | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo.config import cfg
from nova import exception
import nova.image.glance
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id, include_locations=False):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return _fakeImageService.images.keys()[0]
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service
|
the-stack_0_22237 | """Model-Based BPTT Agent."""
from itertools import chain
import torch.nn.modules.loss as loss
from torch.optim import Adam
from rllib.algorithms.bptt import BPTT
from rllib.policy import NNPolicy
from rllib.value_function import NNEnsembleQFunction
from .model_based_agent import ModelBasedAgent
class BPTTAgent(ModelBasedAgent):
"""Implementation of a Back-Propagation Through Time Agent."""
def __init__(
self,
policy,
critic,
dynamical_model,
reward_model,
criterion=loss.MSELoss,
termination_model=None,
num_steps=1,
num_samples=15,
*args,
**kwargs,
):
algorithm = BPTT(
policy=policy,
critic=critic,
dynamical_model=dynamical_model,
reward_model=reward_model,
termination_model=termination_model,
criterion=criterion(reduction="mean"),
num_steps=num_steps,
num_samples=num_samples,
*args,
**kwargs,
)
super().__init__(
policy_learning_algorithm=algorithm,
dynamical_model=dynamical_model,
reward_model=reward_model,
termination_model=termination_model,
*args,
**kwargs,
)
self.optimizer = type(self.optimizer)(
[
p
for name, p in self.algorithm.named_parameters()
if ("model" not in name and "target" not in name and p.requires_grad)
],
**self.optimizer.defaults,
)
@classmethod
def default(cls, environment, critic=None, policy=None, lr=3e-4, *args, **kwargs):
"""See `AbstractAgent.default'."""
if critic is None:
critic = NNEnsembleQFunction.default(environment)
if policy is None:
policy = NNPolicy.default(environment)
optimizer = Adam(chain(policy.parameters(), critic.parameters()), lr=lr)
return super().default(
environment=environment,
policy=policy,
critic=critic,
optimizer=optimizer,
*args,
**kwargs,
)
|
the-stack_0_22240 | #!/usr/bin/env python
import types
def fn(fn):
def ansi(SEQ):
def ANSIseq(ESC='\033', SEQ='{SEQ}', FN='{FN}'):
return '{ESC}[{SEQ}{FN}'.format(ESC=ESC,SEQ=SEQ,FN=FN)
return ANSIseq(SEQ=SEQ,FN=fn)
return ansi
def fn_m():
return fn('m')
def m():
markup= fn_m()
def rgb(t):
def fn(rgb,*a):
if a:
return markup('{t};2;{r};{g};{b}'.format(t=t,r=rgb,g=a[0],b=a[1]))
else:
return markup('{t};2;{r};{g};{b}'.format(t=t,r=rgb[1:2],g=rgb[3:4],b=rgb[5:6]))
return fn
m = types.SimpleNamespace() # 0 - 9
m.reset = markup(0)
m.bold = markup(1)
m.faint = markup(2)
m.italic = markup(3)
m.uline = markup(4)
m.sblink = markup(5)
m.fblink = markup(6)
m.inv = markup(7)
m.hide = markup(8)
m.strike = markup(9)
m.font = types.SimpleNamespace() # 10 - 19 # A - J
m.font.A = markup(10)
m.font.B = markup(11)
m.font.C = markup(12)
m.font.D = markup(13)
m.font.E = markup(14)
m.font.F = markup(15)
m.font.G = markup(16)
m.font.H = markup(17)
m.font.I = markup(18)
m.font.J = markup(19)
m.duline = markup(21)
m.no = types.SimpleNamespace()
m.no.uline = markup(24)
m.no.blink = markup(25)
m.no.strike = markup(29)
m.fg = types.SimpleNamespace()
m.fg.black = markup(30)
m.fg.red = markup(31)
m.fg.green = markup(32)
m.fg.yellow = markup(33)
m.fg.blue = markup(34)
m.fg.magenta = markup(35)
m.fg.cyan = markup(36)
m.fg.white = markup(37)
m.fg.rgb = rgb(38)
m.bg = types.SimpleNamespace()
m.bg.black = markup(40)
m.bg.red = markup(41)
m.bg.green = markup(42)
m.bg.yellow = markup(43)
m.bg.blue = markup(44)
m.bg.magenta = markup(45)
m.bg.cyan = markup(46)
m.bg.white = markup(47)
m.bg.rgb = rgb(48)
m.black = markup(30)
m.red = markup(31)
m.green = markup(32)
m.yellow = markup(33)
m.blue = markup(34)
m.magenta = markup(35)
m.cyan = markup(36)
m.white = markup(37)
m.rgb = rgb(38)
m.bblack = markup(40)
m.bred = markup(41)
m.bgreen = markup(42)
m.byellow = markup(43)
m.bblue = markup(44)
m.bmagenta = markup(45)
m.bcyan = markup(46)
m.bwhite = markup(47)
m.brgb = rgb(48)
m.dct = m.__dict__
return m
def markup(*a):
m()
return ''.join([m.__dict__.get(arg) for arg in a])
def txt(**k) -> str:
"""
examples:
print(str(txt(txt='test',markup=[0,'green','line',0])))
print(str(txt(txt='ikkel',markup=['blue'])))
print(str(txt(txt='ikkel',markup=['strike',0])))
:param k:
:return:
"""
m()
str_styles='{placeholder}'
style_chain=k.get('markup')
if style_chain[0]==0: #=start with reset
style_chain=style_chain[1:]
str_styles=str_styles.format(placeholder='{reset}{placeholder}'.format(reset=m.reset,placeholder='{placeholder}'))
if style_chain[-1]==0: #=stop with reset
style_chain=style_chain[0:-1]
str_styles=str_styles.format(placeholder='{placeholder}{reset}'.format(reset=m.reset,placeholder='{placeholder}'))
for style in style_chain:
str_styles=str_styles.format(placeholder='{style}{placeholder}'.format(style=markup(style),placeholder='{placeholder}'))
reset=m.reset
text=str(k.get('txt'))
return str_styles.format(placeholder=text,reset=reset)
def settxt(*a, **k):
text= k.get('txt') if k.get('txt') else str().join(a) if a else ''
styles=[style for style in k.get('style') if k.get('style')]
txt_styled=txt(text=text, m=styles)
def stdout_text():
return str(txt_styled)
return stdout_text()
def setstyle(**k):
text='{placeholder}'
styles=[style for style in k.get('style') if k.get('style')]
txt_styled=txt(txt=text, markup=styles)
def stdout_text(text):
return str(txt_styled.format(placeholder=text))
return stdout_text
def markup_test(**k):
for style in m.dct.keys():
print(m.dct[style],f'{style}',m.reset)
|
the-stack_0_22241 | # -*- coding: utf-8 -*-
"""
Breneman Corresponding Chromaticities Dataset
=============================================
Defines *Breneman (1987)* results for corresponding chromaticities experiments.
See Also
--------
`Corresponding Chromaticities Prediction Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/corresponding/prediction.ipynb>`_
References
----------
- :cite:`Breneman1987b` : Breneman, E. J. (1987). Corresponding
chromaticities for different states of adaptation to complex visual fields.
Journal of the Optical Society of America A, 4(6), 1115.
doi:10.1364/JOSAA.4.001115
"""
from __future__ import division, unicode_literals
import numpy as np
from collections import namedtuple
from colour.utilities.documentation import DocstringDict
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',
'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',
'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',
'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',
'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',
'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',
'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',
'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'
]
class BrenemanExperimentResult(
namedtuple('BrenemanExperimentResult',
('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):
"""
Experiment result.
Parameters
----------
name : unicode
Test colour name.
uv_t : numeric
Chromaticity coordinates :math:`uv_t^p` of test colour.
uv_m : array_like, (2,)
Chromaticity coordinates :math:`uv_m^p` of matching colour.
s_uv : array_like, (2,), optional
Interobserver variation (:math:`x10^3`) :math:`\\sigma_uv^p`.
d_uv_i : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_i^p`.
d_uv_g : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_g^p`.
"""
def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
BrenemanExperimentResult` class.
"""
return super(BrenemanExperimentResult, cls).__new__(
cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),
np.array(d_uv_i), np.array(d_uv_g))
class PrimariesChromaticityCoordinates(
namedtuple(
'PrimariesChromaticityCoordinates',
('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):
"""
Chromaticity coordinates of primaries.
Parameters
----------
experiment : integer
Experiment.
illuminants : array_like, (2,)
Chromaticity coordinates :math:`uv_t^p` of test colour.
Y : numeric
White luminance :math:`Y` in :math:`cd/m^2`.
P_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`P`.
D_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`D`.
T_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`T`.
"""
def __new__(cls,
experiment,
illuminants,
Y,
P_uvp=None,
D_uvp=None,
T_uvp=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
PrimariesChromaticityCoordinates` class.
"""
return super(PrimariesChromaticityCoordinates, cls).__new__(
cls, experiment, np.array(illuminants), np.array(Y),
np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))
# yapf: disable
BRENEMAN_EXPERIMENT_1_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.259, 0.526), (0.200, 0.475)),
BrenemanExperimentResult(
'Gray',
(0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),
BrenemanExperimentResult(
'Sky',
(0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_1_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_2_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.222, 0.521), (0.204, 0.479)),
BrenemanExperimentResult(
'Gray',
(0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),
BrenemanExperimentResult(
'Skin',
(0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),
BrenemanExperimentResult(
'Orange',
(0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),
BrenemanExperimentResult(
'Brown',
(0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),
BrenemanExperimentResult(
'Yellow',
(0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),
BrenemanExperimentResult(
'Green',
(0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),
BrenemanExperimentResult(
'Blue',
(0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),
BrenemanExperimentResult(
'Sky',
(0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))
# yapf: enable
"""
*Breneman (1987)* experiment 2 results.
BRENEMAN_EXPERIMENT_2_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_3_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.223, 0.521), (0.206, 0.478)),
BrenemanExperimentResult(
'Gray',
(0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),
BrenemanExperimentResult(
'Skin',
(0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),
BrenemanExperimentResult(
'Orange',
(0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),
BrenemanExperimentResult(
'Brown',
(0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),
BrenemanExperimentResult(
'Yellow',
(0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),
BrenemanExperimentResult(
'Green',
(0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),
BrenemanExperimentResult(
'Blue',
(0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),
BrenemanExperimentResult(
'Sky',
(0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))
# yapf: enable
"""
*Breneman (1987)* experiment 3 results.
BRENEMAN_EXPERIMENT_3_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_4_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.523), (0.199, 0.467)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))
# yapf: enable
"""
*Breneman (1987)* experiment 4 results.
BRENEMAN_EXPERIMENT_4_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_5_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.028, 0.480), (0.212, 0.491), (2, 2)),
BrenemanExperimentResult(
'Red',
(0.449, 0.512), (0.408, 0.514), (11, 5)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.262, 0.511), (4, 2)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.303, 0.545), (4, 3)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.303, 0.538), (4, 4)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.264, 0.550), (3, 2)),
BrenemanExperimentResult(
'Foliage',
(0.224, 0.538), (0.227, 0.535), (3, 3)),
BrenemanExperimentResult(
'Green',
(0.134, 0.531), (0.159, 0.530), (9, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.474), (0.165, 0.490), (8, 3)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.329), (0.173, 0.378), (7, 12)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.189, 0.462), (5, 4)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.364), (0.239, 0.401), (4, 16)))
# yapf: enable
"""
*Breneman (1987)* experiment 5 results.
BRENEMAN_EXPERIMENT_5_RESULTS : tuple
Notes
-----
- Effective White Levels : 130 and 2120 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_6_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.257, 0.525), (0.201, 0.482)),
BrenemanExperimentResult(
'Gray',
(0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),
BrenemanExperimentResult(
'Skin',
(0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),
BrenemanExperimentResult(
'Orange',
(0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),
BrenemanExperimentResult(
'Foliage',
(0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))
# yapf: enable
"""
*Breneman (1987)* experiment 6 results.
BRENEMAN_EXPERIMENT_6_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D55*
- White Luminance : 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_7_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.481), (0.211, 0.486), (2, 3)),
BrenemanExperimentResult(
'Red',
(0.448, 0.512), (0.409, 0.516), (9, 2)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.256, 0.506), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.305, 0.547), (5, 4)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.301, 0.539), (5, 2)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.257, 0.552), (3, 4)),
BrenemanExperimentResult(
'Foliage',
(0.225, 0.538), (0.222, 0.536), (3, 2)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.153, 0.529), (8, 2)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.475), (0.160, 0.484), (3, 5)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.331), (0.171, 0.379), (4, 11)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.187, 0.452), (4, 7)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.365), (0.240, 0.398), (4, 10)))
# yapf: enable
"""
*Breneman (1987)* experiment 7 results.
BRENEMAN_EXPERIMENT_7_RESULTS : tuple
Notes
-----
- Effective White Levels : 850 and 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_8_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.524), (0.195, 0.469)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),
BrenemanExperimentResult(
'Foliage',
(0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),
BrenemanExperimentResult(
'Blue-green',
(0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),
BrenemanExperimentResult(
'Sky',
(0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))
# yapf: enable
"""
*Breneman (1987)* experiment 8 results.
BRENEMAN_EXPERIMENT_8_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 350 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_9_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.254, 0.525), (0.195, 0.465)),
BrenemanExperimentResult(
'Gray',
(0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),
BrenemanExperimentResult(
'Foliage',
(0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),
BrenemanExperimentResult(
'Blue-green',
(0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),
BrenemanExperimentResult(
'(Gray)h',
(0.256, 0.525), (0.208, 0.498)),
BrenemanExperimentResult(
'(Red)h',
(0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),
BrenemanExperimentResult(
'(Brown)h',
(0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),
BrenemanExperimentResult(
'(Foliage)h',
(0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),
BrenemanExperimentResult(
'(Green)h',
(0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),
BrenemanExperimentResult(
'(Blue)h',
(0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),
BrenemanExperimentResult(
'(Purple)h',
(0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))
# yapf: enable
"""
*Breneman (1987)* experiment 9 results.
BRENEMAN_EXPERIMENT_9_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 15 :math:`cd/m^2`
- Observers Count : 8
- The colors indicated by (.)h are the darker colors presented at the higher
luminescence level of the lighter colors.
"""
# yapf: disable
BRENEMAN_EXPERIMENT_10_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.213, 0.494), (3, 3)),
BrenemanExperimentResult(
'Red',
(0.447, 0.512), (0.411, 0.506), (15, 7)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.269, 0.511), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.315, 0.536), (7, 8)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.310, 0.526), (6, 8)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.556), (0.268, 0.541), (3, 6)),
BrenemanExperimentResult(
'Foliage',
(0.226, 0.538), (0.230, 0.525), (4, 8)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.158, 0.524), (6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.476), (0.161, 0.491), (4, 4)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.330), (0.171, 0.377), (6, 19)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.439), (0.187, 0.465), (5, 5)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.240, 0.402), (3, 12)))
# yapf: enable
"""
*Breneman (1987)* experiment 10 results.
BRENEMAN_EXPERIMENT_10_RESULTS : tuple
Notes
-----
- Effective White Levels : 15 and 270 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_11_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.208, 0.482), (0.174, 0.520)),
BrenemanExperimentResult(
'Gray',
(0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),
BrenemanExperimentResult(
'Green',
(0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_11_RESULTS : tuple
Notes
-----
- Illuminants : *green*, *D65*
- White Luminance : 1560 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_12_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.205, 0.482), (0.174, 0.519)),
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),
BrenemanExperimentResult(
'Brown',
(0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),
BrenemanExperimentResult(
'Green',
(0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))
# yapf: enable
"""
*Breneman (1987)* experiment 12 results.
BRENEMAN_EXPERIMENT_12_RESULTS : tuple
Notes
-----
- Illuminants : *D55*, *green*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({
1: PrimariesChromaticityCoordinates(
1, ('A', 'D65'), 1500,
(0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),
2: PrimariesChromaticityCoordinates(
2, ('Projector', 'D55'), 1500,
(0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),
3: PrimariesChromaticityCoordinates(
3, ('Projector', 'D55'), 75,
(0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),
4: PrimariesChromaticityCoordinates(
4, ('A', 'D65'), 75,
(0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),
6: PrimariesChromaticityCoordinates(
6, ('A', 'D55'), 11100,
(0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),
8: PrimariesChromaticityCoordinates(
8, ('A', 'D65'), 350,
(0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),
9: PrimariesChromaticityCoordinates(
9, ('A', 'D65'), 15,
(0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),
11: PrimariesChromaticityCoordinates(
11, ('D55', 'green'), 1560,
(0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),
12: PrimariesChromaticityCoordinates(
12, ('D55', 'green'), 75,
(0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})
# yapf: enable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = """
*Breneman (1987)* experiments primaries chromaticities.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict
"""
BRENEMAN_EXPERIMENTS = DocstringDict({
1: BRENEMAN_EXPERIMENT_1_RESULTS,
2: BRENEMAN_EXPERIMENT_2_RESULTS,
3: BRENEMAN_EXPERIMENT_3_RESULTS,
4: BRENEMAN_EXPERIMENT_4_RESULTS,
5: BRENEMAN_EXPERIMENT_5_RESULTS,
6: BRENEMAN_EXPERIMENT_6_RESULTS,
7: BRENEMAN_EXPERIMENT_7_RESULTS,
8: BRENEMAN_EXPERIMENT_8_RESULTS,
9: BRENEMAN_EXPERIMENT_9_RESULTS,
10: BRENEMAN_EXPERIMENT_10_RESULTS,
11: BRENEMAN_EXPERIMENT_11_RESULTS,
12: BRENEMAN_EXPERIMENT_12_RESULTS
})
BRENEMAN_EXPERIMENTS.__doc__ = """
*Breneman (1987)* experiments.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS : dict
"""
|
the-stack_0_22242 | from utils import *
from generator import *
from discriminator import *
## Hyperparameters
criterion = nn.BCEWithLogitsLoss()
n_epochs = 200
z_dim = 64
display_step = 500
batch_size = 128
lr = 0.00001
device = 'cpu'
# Load MNIST with DataLoader
dataloader = DataLoader(
MNIST('.',
download=True,
transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True
)
gen = Generator(z_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator().to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
# States
cur_step = 0
mean_generator_loss = 0
mean_discriminator_loss = 0
test_generator = True
gen_loss = False
error = False
for epoch in range(n_epochs):
# Dataloader returns the batches
for real, _ in tqdm(dataloader):
cur_batch_size = len(real)
# Flatten batch of real images from dataset
real = real.view(cur_batch_size, -1).to(device)
# Zero out gradients before backprop
disc_opt.zero_grad()
# Discriminator loss
disc_loss = get_disc_loss(gen, disc, criterion, real, cur_batch_size, z_dim, device)
# Update gradients
disc_loss.backward(retain_graph=True)
# Update optimizer
disc_opt.step()
# Zero out gradients before backprop
gen_opt.zero_grad()
# Generator loss
gen_loss = get_gen_loss(gen, disc, criterion, cur_batch_size, z_dim, device)
# Update gradients
gen_loss.backward()
# Update optimizer
gen_opt.step()
# Keep track of average generator loss
mean_generator_loss += gen_loss.item() / display_step
# Monitoring progress
if cur_step % display_step == 0 and cur_step > 0:
print(f"Epoch {epoch}, step {cur_step} -> Generator loss: {mean_generator_loss}, discriminator_loss: {mean_discriminator_loss}")
fake_noise = get_noise(cur_batch_size, z_dim, device=device)
fake = gen(fake_noise)
show_tensor_images(fake)
show_tensor_images(real)
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1
|
the-stack_0_22244 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'autofilter04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.txt_filename = test_dir + 'xlsx_files/' + 'autofilter_data.txt'
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.py example:
Example 4. Autofilter with filter conditions in two columns.
"""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('A1:D51')
# Add filter criteria.
worksheet.filter_column('A', 'x == East')
worksheet.filter_column('C', 'x > 3000 and x < 8000')
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('A1', headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Get some of the field data.
region = data[0]
volume = int(data[2])
# Check for rows that match the filter.
if region == 'East' and volume > 3000 and volume < 8000:
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_22245 | """Main entry point for running discord-hero
discord-hero: Discord Application Framework for humans
:copyright: (c) 2019-2020 monospacedmagic et al.
:license: Apache-2.0 OR MIT
"""
import time
import discord
from discord import RawReactionActionEvent
import hero
from hero import checks, models, strings
from hero.errors import InactiveUser, UserDoesNotExist
class Essentials(hero.Cog):
core: hero.Core
@hero.command()
@checks.is_owner()
async def set_prefixes(self, ctx, *prefixes: str):
await self.core.set_prefixes(prefixes)
await ctx.send("Done.")
@hero.command()
@checks.is_owner()
async def set_description(self, ctx, *, description: str):
await self.core.set_description(description)
await ctx.send("Done.")
@hero.command()
@checks.is_owner()
async def set_status(self, ctx, *, status: str):
await self.core.set_status(status)
await ctx.send("Done.")
@hero.command()
async def ping(self, ctx):
"""Calculates the ping time."""
t_1 = time.perf_counter()
await ctx.trigger_typing()
t_2 = time.perf_counter()
time_delta = round((t_2-t_1)*1000)
await ctx.send("Pong.\nTime: {}ms".format(time_delta))
# GDPR
@hero.command()
async def register(self, ctx: hero.Context):
"""Registers you in my system."""
if ctx.guild:
user = ctx.author._user
else:
user = ctx.author
try:
user = await self.db.wrap_user(user)
print(user, user.is_active)
await ctx.send("You are already registered in my system!")
return
except UserDoesNotExist:
user = models.User(user.id)
await user.async_save()
except InactiveUser:
user = models.User(user.id)
user.is_active = True
await user.async_save()
await ctx.send("You are now registered. Thank you for using my commands and functions!\n\n"
"If you ever change your mind, just use `{ctx.prefix}unregister` to "
"remove yourself from my system, which will irreversibly and immediately "
"delete all data related to your Discord ID from my system.")
# GDPR
@hero.command()
async def unregister(self, ctx):
"""Removes you from my system."""
if ctx.guild:
user = ctx.author._user
else:
user = ctx.author
try:
user = await self.db.wrap_user(user)
print(user, user.is_active)
await user.async_delete()
except UserDoesNotExist:
user = models.User(user.id)
user.is_active = False
await user.async_save()
except InactiveUser:
await ctx.send("You are already unregistered!")
return
await ctx.send(f"You have been successfully removed from my system! You will have to use "
f"`{ctx.prefix}register` if you change your mind to enable storing data "
f"related to your Discord user ID again.")
@hero.listener()
async def on_raw_reaction_add(self, payload: RawReactionActionEvent):
user_id = payload.user_id
emoji: discord.PartialEmoji = payload.emoji
message_id = payload.message_id
guild_id = payload.guild_id
channel_id = payload.channel_id
# obligatory checks for efficiency
if user_id == self.core.user.id or emoji.is_custom_emoji():
return
# check if message is user's register_message
user = models.User(id=user_id)
await user.async_load()
register_message = user.register_message
if register_message is None or register_message.id != message_id or emoji.name != self.core.YES_EMOJI:
return
# register the user
user.is_active = True
user.register_message.async_delete()
await user.async_save()
channel = None
if guild_id is not None:
guild = await self.core.get_guild(guild_id)
if guild is None:
await self.core.fetch_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
message_text = "{user}, you have now been registered! Remember, you can use " \
"`{prefix}unregister` to immediately delete all data related to " \
"your Discord ID from my system."
try:
await user.send(message_text.format(user=user.name, prefix=self.core.default_prefix))
except discord.Forbidden:
if channel is not None:
await channel.send(message_text.format(user=user.mention, prefix=self.core.default_prefix))
|
the-stack_0_22247 | from hir.Specifier import Specifier
class PointerSpecifierException(Exception):
def __init__(self, value):
self.value = value
class PointerSpecifier(Specifier):
'Pointer Specifier'
__slots__ = ('qualifiers')
def __init__(self, qual=None):
self.qualifiers = []
if qual is not None:
for k in qual:
self.qualifiers.append(k)
def __str__(self):
retval = '*'
jj = ' '.join([str(k) for k in self.qualifiers])
if jj != '':
retval += ' ' + jj
return retval
__repr__ = __str__
def items(self):
"""Returns the 'dimensions' list of ints"""
items = {}
items['qualifiers'] = self.qualifiers
for k in PointerSpecifier.__bases__:
if hasattr(k, 'items'):
supitems = k.items(self)
for k, v in list(supitems.items()):
items[k] = v
return dict(items)
def __getstate__(self):
"""Returns the 'qualifier' list of ints. Calls items directly"""
return dict(self.items())
def __setstate__(self, statedict):
"""Blindly sets the state of this object, using a statedict"""
for k, v in list(statedict.items()):
setattr(self, k, v)
def PointerSpecifierTest():
from hir.Keyword import CONST, VOLATILE, RESTRICT
k = PointerSpecifier([CONST])
print(k)
k = PointerSpecifier([CONST, VOLATILE])
print(k)
k = PointerSpecifier([CONST, RESTRICT])
print(k)
k = PointerSpecifier([CONST, RESTRICT, VOLATILE])
print(k)
return k
if __name__ == '__main__':
PointerSpecifierTest()
|
the-stack_0_22248 | #!/usr/bin/python
"""
Script to check language links for general pages.
Uses existing translations of a page, plus hints from the command line, to
download the equivalent pages from other languages. All of such pages are
downloaded as well and checked for interwiki links recursively until there are
no more links that are encountered. A rationalization process then selects the
right interwiki links, and if this is unambiguous, the interwiki links in the
original page will be automatically updated and the modified page uploaded.
These command-line arguments can be used to specify which pages to work on:
-days: Like -years, but runs through all date pages. Stops at
Dec 31. If the argument is given in the form -days:X,
it will start at month no. X through Dec 31. If the
argument is simply given as -days, it will run from
Jan 1 through Dec 31. E.g. for -days:9 it will run
from Sep 1 through Dec 31.
-years: run on all year pages in numerical order. Stop at year 2050.
If the argument is given in the form -years:XYZ, it
will run from [[XYZ]] through [[2050]]. If XYZ is a
negative value, it is interpreted as a year BC. If the
argument is simply given as -years, it will run from 1
through 2050.
This implies -noredirect.
-new: Work on the 100 newest pages. If given as -new:x, will work
on the x newest pages.
When multiple -namespace parameters are given, x pages are
inspected, and only the ones in the selected name spaces are
processed. Use -namespace:all for all namespaces. Without
-namespace, only article pages are processed.
This implies -noredirect.
-restore: restore a set of "dumped" pages the bot was working on
when it terminated. The dump file will be subsequently
removed.
-restore:all restore a set of "dumped" pages of all dumpfiles to a given
family remaining in the "interwiki-dumps" directory. All
these dump files will be subsequently removed. If restoring
process interrupts again, it saves all unprocessed pages in
one new dump file of the given site.
-continue: like restore, but after having gone through the dumped
pages, continue alphabetically starting at the last of the
dumped pages. The dump file will be subsequently removed.
¶ms;
Additionally, these arguments can be used to restrict the bot to certain pages:
-namespace:n Number or name of namespace to process. The parameter can be
used multiple times. It works in combination with all other
parameters, except for the -start parameter. If you e.g.
want to iterate over all categories starting at M, use
-start:Category:M.
-number: used as -number:#, specifies that the bot should process
that amount of pages and then stop. This is only useful in
combination with -start. The default is not to stop.
-until: used as -until:title, specifies that the bot should
process pages in wiki default sort order up to, and
including, "title" and then stop. This is only useful in
combination with -start. The default is not to stop.
Note: do not specify a namespace, even if -start has one.
-bracket only work on pages that have (in the home language)
parenthesis in their title. All other pages are skipped.
(note: without ending colon)
-skipfile: used as -skipfile:filename, skip all links mentioned in
the given file. This does not work with -number!
-skipauto use to skip all pages that can be translated automatically,
like dates, centuries, months, etc.
(note: without ending colon)
-lack: used as -lack:xx with xx a language code: only work on pages
without links to language xx. You can also add a number nn
like -lack:xx:nn, so that the bot only works on pages with
at least nn interwiki links (the default value for nn is 1).
These arguments control miscellaneous bot behaviour:
-quiet Use this option to get less output
(note: without ending colon)
-async Put page on queue to be saved to wiki asynchronously. This
enables loading pages during saving throtteling and gives a
better performance.
NOTE: For post-processing it always assumes that saving the
the pages was successful.
(note: without ending colon)
-summary: Set an additional action summary message for the edit. This
could be used for further explainings of the bot action.
This will only be used in non-autonomous mode.
-hintsonly The bot does not ask for a page to work on, even if none of
the above page sources was specified. This will make the
first existing page of -hint or -hinfile slip in as start
page, determining properties like namespace, disambiguation
state, and so on. When no existing page is found in the
hints, the bot does nothing.
Hitting return without input on the "Which page to check:"
prompt has the same effect as using -hintsonly.
Options like -back, -same or -wiktionary are in effect only
after a page has been found to work on.
(note: without ending colon)
These arguments are useful to provide hints to the bot:
-hint: used as -hint:de:Anweisung to give the bot a hint
where to start looking for translations. If no text
is given after the second ':', the name of the page
itself is used as the title for the hint, unless the
-hintnobracket command line option (see there) is also
selected.
There are some special hints, trying a number of languages
at once:
* all: All languages with at least ca. 100 articles
* 10: The 10 largest languages (sites with most
articles). Analogous for any other natural
number
* arab: All languages using the Arabic alphabet
* cyril: All languages that use the Cyrillic alphabet
* chinese: All Chinese dialects
* latin: All languages using the Latin script
* scand: All Scandinavian languages
Names of families that forward their interlanguage links
to the wiki family being worked upon can be used, they are:
* commons: Interlanguage links of Wikimedia Commons
* incubator: Links in pages on the Wikimedia Incubator
* meta: Interlanguage links of named pages on Meta
* species: Interlanguage links of the Wikispecies wiki
* strategy: Links in pages on Wikimedia Strategy wiki
* test: Take interwiki links from Test Wikipedia
* wikimania: Interwiki links of Wikimania
Languages, groups and families having the same page title
can be combined, as -hint:5,scand,sr,pt,commons:New_York
-hintfile: similar to -hint, except that hints are taken from the given
file, enclosed in [[]] each, instead of the command line.
-askhints: for each page one or more hints are asked. See hint: above
for the format, one can for example give "en:something" or
"20:" as hint.
-repository Include data repository
-same looks over all 'serious' languages for the same title.
-same is equivalent to -hint:all:
(note: without ending colon)
-wiktionary: similar to -same, but will ONLY accept names that are
identical to the original. Also, if the title is not
capitalized, it will only go through other wikis without
automatic capitalization.
-untranslated: works normally on pages with at least one interlanguage
link; asks for hints for pages that have none.
-untranslatedonly: same as -untranslated, but pages which already have a
translation are skipped. Hint: do NOT use this in
combination with -start without a -number limit, because
you will go through the whole alphabet before any queries
are performed!
-showpage when asking for hints, show the first bit of the text
of the page always, rather than doing so only when being
asked for (by typing '?'). Only useful in combination
with a hint-asking option like -untranslated, -askhints
or -untranslatedonly.
(note: without ending colon)
-noauto Do not use the automatic translation feature for years and
dates, only use found links and hints.
(note: without ending colon)
-hintnobracket used to make the bot strip everything in last brackets,
and surrounding spaces from the page name, before it is
used in a -hint:xy: where the page name has been left out,
or -hint:all:, -hint:10:, etc. without a name, or
an -askhint reply, where only a language is given.
These arguments define how much user confirmation is required:
-autonomous run automatically, do not ask any questions. If a question
-auto to an operator is needed, write the name of the page
to autonomous_problems.dat and continue on the next page.
(note: without ending colon)
-confirm ask for confirmation before any page is changed on the
live wiki. Without this argument, additions and
unambiguous modifications are made without confirmation.
(note: without ending colon)
-force do not ask permission to make "controversial" changes,
like removing a language because none of the found
alternatives actually exists.
(note: without ending colon)
-cleanup like -force but only removes interwiki links to non-existent
or empty pages.
-select ask for each link whether it should be included before
changing any page. This is useful if you want to remove
invalid interwiki links and if you do multiple hints of
which some might be correct and others incorrect. Combining
-select and -confirm is possible, but seems like overkill.
(note: without ending colon)
These arguments specify in which way the bot should follow interwiki links:
-noredirect do not follow redirects nor category redirects.
(note: without ending colon)
-initialredirect work on its target if a redirect or category redirect is
entered on the command line or by a generator (note: without
ending colon). It is recommended to use this option with the
-movelog pagegenerator.
-neverlink: used as -neverlink:xx where xx is a language code:
Disregard any links found to language xx. You can also
specify a list of languages to disregard, separated by
commas.
-ignore: used as -ignore:xx:aaa where xx is a language code, and
aaa is a page title to be ignored.
-ignorefile: similar to -ignore, except that the pages are taken from
the given file instead of the command line.
-localright do not follow interwiki links from other pages than the
starting page. (Warning! Should be used very sparingly,
only when you are sure you have first gotten the interwiki
links on the starting page exactly right).
(note: without ending colon)
-hintsareright do not follow interwiki links to sites for which hints
on existing pages are given. Note that, hints given
interactively, via the -askhint command line option,
are only effective once they have been entered, thus
interwiki links on the starting page are followed
regardess of hints given when prompted.
(Warning! Should be used with caution!)
(note: without ending colon)
-back only work on pages that have no backlink from any other
language; if a backlink is found, all work on the page
will be halted. (note: without ending colon)
The following arguments are only important for users who have accounts for
multiple languages, and specify on which sites the bot should modify pages:
-localonly only work on the local wiki, not on other wikis in the
family I have a login at. (note: without ending colon)
-limittwo only update two pages - one in the local wiki (if logged-in)
and one in the top available one.
For example, if the local page has links to de and fr,
this option will make sure that only the local site and
the de: (larger) sites are updated. This option is useful
to quickly set two way links without updating all of the
wiki families sites.
(note: without ending colon)
-whenneeded works like limittwo, but other languages are changed in the
following cases:
* If there are no interwiki links at all on the page
* If an interwiki link must be removed
* If an interwiki link must be changed and there has been
a conflict for this page
Optionally, -whenneeded can be given an additional number
(for example -whenneeded:3), in which case other languages
will be changed if there are that number or more links to
change or add. (note: without ending colon)
The following arguments influence how many pages the bot works on at once:
-array: The number of pages the bot tries to be working on at once.
If the number of pages loaded is lower than this number,
a new set of pages is loaded from the starting wiki. The
default is 100, but can be changed in the config variable
interwiki_min_subjects
-query: The maximum number of pages that the bot will load at once.
Default value is 50.
Some configuration option can be used to change the working of this bot:
interwiki_min_subjects: the minimum amount of subjects that should be
processed at the same time.
interwiki_backlink: if set to True, all problems in foreign wikis will
be reported
interwiki_shownew: should interwiki.py display every new link it discovers?
interwiki_graph: output a graph PNG file on conflicts? You need pydot for
this: https://pypi.org/project/pydot/
interwiki_graph_format: the file format for interwiki graphs
without_interwiki: save file with local articles without interwikis
All these options can be changed through the user-config.py configuration file.
If interwiki.py is terminated before it is finished, it will write a dump file
to the interwiki-dumps subdirectory. The program will read it if invoked with
the "-restore" or "-continue" option, and finish all the subjects in that list.
After finishing the dump file will be deleted. To run the interwiki-bot on all
pages on a language, run it with option "-start:!", and if it takes so long
that you have to break it off, use "-continue" next time.
"""
#
# (C) Pywikibot team, 2003-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import re
import socket
import sys
from collections import Counter, defaultdict
from contextlib import suppress
from textwrap import fill
import pywikibot
from pywikibot import (
config,
i18n,
interwiki_graph,
pagegenerators,
textlib,
titletranslate,
)
from pywikibot.bot import ListOption, OptionHandler, StandardOption
from pywikibot.cosmetic_changes import moved_links
from pywikibot.exceptions import (
EditConflictError,
Error,
InvalidTitleError,
LockedPageError,
NoCreateError,
NoPageError,
NoUsernameError,
PageSaveRelatedError,
ServerError,
SiteDefinitionError,
SpamblacklistError,
UnknownSiteError,
)
from pywikibot.tools import SizedKeyCollection, first_upper
from pywikibot.tools.formatter import color_format
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
class SaveError(Error):
"""An attempt to save a page with changed interwiki has failed."""
class LinkMustBeRemoved(SaveError):
"""An interwiki link has to be removed manually.
An interwiki link has to be removed, but this can't be done because
of user preferences or because the user chose not to change the page.
"""
class GiveUpOnPage(Error):
"""User chose not to work on this page and its linked pages any more."""
# A list of template names in different languages.
# Pages which contain these shouldn't be changed.
ignoreTemplates = {
'_default': ['delete'],
'ar': ['تحرر', 'تحويل لين'],
'ary': ['كاتبدل دابا'],
'arz': ['بتتطور'],
'cs': ['Pracuje_se'],
'de': ['inuse', 'in use', 'in bearbeitung', 'inbearbeitung',
'löschen', 'sla',
'löschantrag', 'löschantragstext',
'falschschreibung',
'obsolete schreibung', 'veraltete schreibweise'],
'en': ['inuse', 'softredirect'],
'fa': ['در دست ویرایش ۲', 'حذف سریع'],
'pdc': ['lösche'],
'zh': ['inuse'],
}
class InterwikiBotConfig:
"""Container class for interwikibot's settings."""
autonomous = False
confirm = False
always = False
select = False
followredirect = True
initialredirect = False
force = False
cleanup = False
remove = []
maxquerysize = 50
same = False
skip = set()
skipauto = False
untranslated = False
untranslatedonly = False
auto = True
neverlink = []
showtextlink = 0
showtextlinkadd = 300
localonly = False
limittwo = False
strictlimittwo = False
needlimit = 0
ignore = []
parenthesesonly = False
rememberno = False
followinterwiki = True
minsubjects = config.interwiki_min_subjects
nobackonly = False
askhints = False
hintnobracket = False
hints = []
hintsareright = False
lacklanguage = None
minlinks = 0
quiet = False
restore_all = False
asynchronous = False
summary = ''
repository = False
def note(self, text):
"""Output a notification message with.
The text will be printed only if conf.quiet isn't set.
@param text: text to be shown
@type text: str
"""
if not self.quiet:
pywikibot.output('NOTE: ' + text)
def readOptions(self, option):
"""Read all commandline parameters for the global container."""
arg, _, value = option.partition(':')
if not arg.startswith('-'):
return False
arg = arg[1:]
if arg == 'noauto':
self.auto = False
elif arg == 'hint':
self.hints.append(value)
elif arg == 'hintfile':
hintfilename = value or pywikibot.input(
'Please enter the hint filename:')
# hint or title ends either before | or before ]]
R = re.compile(r'\[\[(.+?)(?:\]\]|\|)')
with codecs.open(hintfilename, 'r', config.textfile_encoding) as f:
self.hints += R.findall(f.read())
elif arg == 'wiktionary':
self.same = 'wiktionary'
# Don't use auto-translation in -wiktionary mode
# where page titles must be the same
self.auto = False
elif arg == 'untranslatedonly':
self.untranslated = True
self.untranslatedonly = True
elif arg == 'askhints':
self.untranslated = True
self.untranslatedonly = False
self.askhints = True
elif arg in ('autonomous', 'auto'):
self.autonomous = True
elif arg == 'noredirect':
self.followredirect = False
elif arg == 'limittwo':
self.limittwo = True
self.strictlimittwo = True
elif arg == 'whenneeded':
self.limittwo = True
self.strictlimittwo = False
if value.isdigit():
self.needlimit = int(value)
elif arg == 'skipfile':
skip_page_gen = pagegenerators.TextfilePageGenerator(value)
self.skip.update(skip_page_gen)
del skip_page_gen
elif arg == 'neverlink':
self.neverlink += value.split(',')
elif arg == 'ignore':
self.ignore += [pywikibot.Page(pywikibot.Site(), p)
for p in value.split(',')]
elif arg == 'ignorefile':
ignore_page_gen = pagegenerators.TextfilePageGenerator(value)
self.ignore.update(ignore_page_gen)
del ignore_page_gen
elif arg == 'showpage':
self.showtextlink += self.showtextlinkadd
elif arg == 'graph':
# override configuration
config.interwiki_graph = True
elif arg == 'bracket':
self.parenthesesonly = True
elif arg == 'localright':
self.followinterwiki = False
elif arg == 'array' and value.isdigit():
self.minsubjects = int(value)
elif arg == 'query' and value.isdigit():
self.maxquerysize = int(value)
elif arg == 'back':
self.nobackonly = True
elif arg == 'async':
self.asynchronous = True
elif arg == 'summary':
self.summary = value or pywikibot.input(
'What summary do you want to use?')
elif arg == 'lack':
self.lacklanguage, sep, minlinks = value.partition(':')
self.minlinks = int(minlinks) if minlinks.isdigit() else 1
elif arg in ('cleanup', 'confirm', 'force', 'hintnobracket',
'hintsareright', 'initialredirect', 'localonly', 'quiet',
'repository', 'same', 'select', 'skipauto',
'untranslated'):
assert hasattr(self, arg)
assert value == ''
setattr(self, arg, True)
else:
return False
return True
class PageTree(SizedKeyCollection):
"""
Structure to manipulate a set of pages.
Allows filtering efficiently by Site.
"""
def __init__(self):
"""Initializer.
While using dict values would be faster for the remove() operation,
keeping list values is important, because the order in which the pages
were found matters: the earlier a page is found, the closer it is to
the Subject.origin. Chances are that pages found within 2 interwiki
distance from the origin are more related to the original topic
than pages found later on, after 3, 4, 5 or more interwiki hops.
Keeping this order is hence important to display an ordered
list of pages to the user when he'll be asked to resolve
conflicts.
@ivar data: dictionary with Site as keys and list of page as values.
All pages found within Site are kept in self.data[site].
@type data: defaultdict(list)
"""
super().__init__('site')
class Subject(interwiki_graph.Subject):
"""
Class to follow the progress of a single 'subject'.
(i.e. a page with all its translations)
Subject is a transitive closure of the binary relation on Page:
"has_a_langlink_pointing_to".
A formal way to compute that closure would be:
With P a set of pages, NL ('NextLevel') a function on sets defined as:
`NL(P) = { target | ∃ source ∈ P, target ∈ source.langlinks() }`
pseudocode::
todo <- [origin]
done <- []
while todo != []:
pending <- todo
todo <-NL(pending) / done
done <- NL(pending) U done
return done
There is, however, one limitation that is induced by implementation:
to compute efficiently NL(P), one has to load the page contents of
pages in P.
(Not only the langlinks have to be parsed from each Page, but we also want
to know if the Page is a redirect, a disambiguation, etc...)
Because of this, the pages in pending have to be preloaded.
However, because the pages in pending are likely to be in several sites
we cannot "just" preload them as a batch.
Instead of doing "pending <- todo" at each iteration, we have to elect a
Site, and we put in pending all the pages from todo that belong to that
Site:
Code becomes::
todo <- {origin.site: [origin]}
done <- []
while todo != {}:
site <- electSite()
pending <- todo[site]
preloadpages(site, pending)
todo[site] <- NL(pending) / done
done <- NL(pending) U done
return done
Subject objects only operate on pages that should have been preloaded
before. In fact, at any time:
* todo contains new Pages that have not been loaded yet
* done contains Pages that have been loaded, and that have been treated.
* If batch preloadings are successful, Page._get() is never called from
this Object.
"""
def __init__(self, origin=None, hints=None, conf=None):
"""
Initializer.
Takes as arguments the Page on the home wiki
plus optionally a list of hints for translation
"""
self.conf = conf
super().__init__(origin)
self.repoPage = None
# todo is a list of all pages that still need to be analyzed.
# Mark the origin page as todo.
self.todo = PageTree()
if origin:
self.todo.append(origin)
# done is a list of all pages that have been analyzed and that
# are known to belong to this subject.
self.done = PageTree()
# This is a list of all pages that are currently scheduled for
# download.
self.pending = PageTree()
if self.conf.hintsareright:
# This is a set of sites that we got hints to
self.hintedsites = set()
self.translate(hints, self.conf.hintsareright)
self.confirm = self.conf.confirm
self.problemfound = False
self.untranslated = None
self.hintsAsked = False
self.forcedStop = False
self.workonme = True
def getFoundDisambig(self, site):
"""
Return the first disambiguation found.
If we found a disambiguation on the given site while working on the
subject, this method returns it. If several ones have been found, the
first one will be returned.
Otherwise, None will be returned.
"""
for tree in [self.done, self.pending]:
for page in tree.filter(site):
if page.exists() and page.isDisambig():
return page
return None
def getFoundNonDisambig(self, site):
"""
Return the first non-disambiguation found.
If we found a non-disambiguation on the given site while working on the
subject, this method returns it. If several ones have been found, the
first one will be returned.
Otherwise, None will be returned.
"""
for tree in [self.done, self.pending]:
for page in tree.filter(site):
if page.exists() \
and not page.isDisambig() \
and not page.isRedirectPage() \
and not page.isCategoryRedirect():
return page
return None
def getFoundInCorrectNamespace(self, site):
"""
Return the first page in the extended namespace.
If we found a page that has the expected namespace on the given site
while working on the subject, this method returns it. If several ones
have been found, the first one will be returned.
Otherwise, None will be returned.
"""
for tree in [self.done, self.pending, self.todo]:
for page in tree.filter(site):
# -hintsonly: before we have an origin page, any namespace will
# do.
if self.origin and page.namespace() == self.origin.namespace():
if page.exists() \
and not page.isRedirectPage() \
and not page.isCategoryRedirect():
return page
return None
def translate(self, hints=None, keephintedsites=False):
"""Add the given translation hints to the todo list."""
if self.conf.same and self.origin:
if hints:
hints += ['all:']
else:
hints = ['all:']
site = self.origin.site
else:
site = pywikibot.Site()
links = titletranslate.translate(
self.origin,
hints=hints,
auto=self.conf.auto,
removebrackets=self.conf.hintnobracket,
site=site)
for link in links:
page = pywikibot.Page(link)
self.todo.append(page)
self.found_in[page] = [None]
if keephintedsites:
self.hintedsites.add(page.site)
def openSites(self):
"""
Iterator.
Yields (site, count) pairs:
* site is a site where we still have work to do on
* count is the number of items in that Site that need work on
"""
return self.todo.iter_values_len()
def whatsNextPageBatch(self, site):
"""
Return the next page batch.
By calling this method, you 'promise' this instance that you will
preload all the 'site' Pages that are in the todo list.
This routine will return a list of pages that can be treated.
"""
# Bug-check: Isn't there any work still in progress? We can't work on
# different sites at a time!
if self.pending:
raise RuntimeError(
"BUG: Can't start to work on {}; still working on {}"
.format(site, self.pending))
# Prepare a list of suitable pages
result = []
for page in self.todo.filter(site):
self.pending.append(page)
result.append(page)
self.todo.remove_key(site)
# If there are any, return them. Otherwise, nothing is in progress.
return result
def makeForcedStop(self, counter):
"""End work on the page before the normal end."""
for site, count in self.todo.iter_values_len():
counter.minus(site, count)
self.todo.clear()
self.forcedStop = True
def addIfNew(self, page, counter, linkingPage):
"""
Add the pagelink given to the todo list, if it hasn't been seen yet.
If it is added, update the counter accordingly.
Also remembers where we found the page, regardless of whether it had
already been found before or not.
Returns True if the page is new.
"""
if self.forcedStop:
return False
# cannot check backlink before we have an origin page
if self.conf.nobackonly and self.origin and page == self.origin:
try:
pywikibot.output('{} has a backlink from {}.'
.format(page, linkingPage))
except UnicodeDecodeError:
pywikibot.output('Found a backlink for a page.')
self.makeForcedStop(counter)
return False
if page in self.found_in:
# not new
self.found_in[page].append(linkingPage)
return False
self.found_in[page] = [linkingPage]
self.todo.append(page)
counter.plus(page.site)
return True
def skipPage(self, page, target, counter):
"""Return whether page has to be skipped."""
return self.isIgnored(target) \
or self.namespaceMismatch(page, target, counter) \
or self.wiktionaryMismatch(target)
def namespaceMismatch(self, linkingPage, linkedPage, counter):
"""
Check whether or not the given page has a different namespace.
Returns True if the namespaces are different and the user
has selected not to follow the linked page.
"""
if linkedPage in self.found_in:
# We have seen this page before, don't ask again.
return False
if self.origin and self.origin.namespace() != linkedPage.namespace():
# Allow for a mapping between different namespaces
crossFrom = self.origin.site.family.crossnamespace.get(
self.origin.namespace(), {})
crossTo = crossFrom.get(self.origin.site.lang,
crossFrom.get('_default', {}))
nsmatch = crossTo.get(linkedPage.site.lang,
crossTo.get('_default', []))
if linkedPage.namespace() in nsmatch:
return False
if self.conf.autonomous:
pywikibot.output(
'NOTE: Ignoring link from page {} in namespace'
' {} to page {} in namespace {}.'
.format(linkingPage, linkingPage.namespace(), linkedPage,
linkedPage.namespace()))
# Fill up found_in, so that we will not write this notice
self.found_in[linkedPage] = [linkingPage]
return True
preferredPage = self.getFoundInCorrectNamespace(linkedPage.site)
if preferredPage:
pywikibot.output(
'NOTE: Ignoring link from page {} in namespace {} to '
'page {} in namespace {} because page {} in the '
'correct namespace has already been found.'
.format(linkingPage, linkingPage.namespace(),
linkedPage, linkedPage.namespace(),
preferredPage))
return True
choice = pywikibot.input_choice(
'WARNING: {} is in namespace "{}", but {} is in '
'namespace "{}". Follow it anyway?'
.format(self.origin, self.origin.namespace(),
linkedPage, linkedPage.namespace()),
[('Yes', 'y'), ('No', 'n'),
('Add an alternative', 'a'), ('give up', 'g')],
automatic_quit=False)
if choice != 'y':
# Fill up found_in, so that we will not ask again
self.found_in[linkedPage] = [linkingPage]
if choice == 'g':
self.makeForcedStop(counter)
elif choice == 'a':
newHint = pywikibot.input(
'Give the alternative for language {}, not '
'using a language code:'
.format(linkedPage.site.lang))
if newHint:
alternativePage = pywikibot.Page(
linkedPage.site, newHint)
if alternativePage:
# add the page that was entered by the user
self.addIfNew(alternativePage, counter, None)
else:
pywikibot.output(
'NOTE: ignoring {} and its interwiki links'
.format(linkedPage))
return True
# same namespaces, no problem
# or no origin page yet, also no problem
return False
def wiktionaryMismatch(self, page):
"""Check for ignoring pages."""
if self.origin and self.conf.same == 'wiktionary':
if page.title().lower() != self.origin.title().lower():
pywikibot.output('NOTE: Ignoring {} for {} in wiktionary mode'
.format(page, self.origin))
return True
if (page.title() != self.origin.title()
and self.origin.namespace().case == 'case-sensitive'
and page.namespace().case == 'case-sensitive'):
pywikibot.output(
'NOTE: Ignoring {} for {} in wiktionary mode because both '
'languages are uncapitalized.'
.format(page, self.origin))
return True
return False
def disambigMismatch(self, page, counter):
"""
Check whether the given page has a different disambiguation status.
Returns a tuple (skip, alternativePage).
skip is True if the pages have mismatching statuses and the bot
is either in autonomous mode, or the user chose not to use the
given page.
alternativePage is either None, or a page that the user has
chosen to use instead of the given page.
"""
if not self.origin:
return (False, None) # any page matches til we have an origin page
if self.conf.autonomous:
if self.origin.isDisambig() and not page.isDisambig():
pywikibot.output(
'NOTE: Ignoring link from disambiguation page {} to '
'non-disambiguation {}'.format(self.origin, page))
return (True, None)
if not self.origin.isDisambig() and page.isDisambig():
pywikibot.output(
'NOTE: Ignoring link from non-disambiguation page {} to '
'disambiguation {}'.format(self.origin, page))
return (True, None)
else:
choice = 'y'
if self.origin.isDisambig() and not page.isDisambig():
disambig = self.getFoundDisambig(page.site)
if disambig:
pywikibot.output(
'NOTE: Ignoring non-disambiguation page {} for {} '
'because disambiguation page {} has already been '
'found.'
.format(page, self.origin, disambig))
return (True, None)
choice = pywikibot.input_choice(
"WARNING: {} is a disambiguation page, but {} doesn't "
'seem to be one. Follow it anyway?'
.format(self.origin, page),
[('Yes', 'y'), ('No', 'n'),
('Add an alternative', 'a'), ('give up', 'g')],
automatic_quit=False)
elif not self.origin.isDisambig() and page.isDisambig():
nondisambig = self.getFoundNonDisambig(page.site)
if nondisambig:
pywikibot.output(
'NOTE: Ignoring disambiguation page {} for {} because '
'non-disambiguation page {} has already been found.'
.format(page, self.origin, nondisambig))
return (True, None)
choice = pywikibot.input_choice(
"WARNING: {} doesn't seem to be a disambiguation "
'page, but {} is one. Follow it anyway?'
.format(self.origin, page),
[('Yes', 'y'), ('No', 'n'),
('Add an alternative', 'a'), ('give up', 'g')],
automatic_quit=False)
if choice == 'n':
return (True, None)
if choice == 'a':
newHint = pywikibot.input(
'Give the alternative for language {}, not using a '
'language code:'.format(page.site.lang))
alternativePage = pywikibot.Page(page.site, newHint)
return (True, alternativePage)
if choice == 'g':
self.makeForcedStop(counter)
return (True, None)
# We can follow the page.
return (False, None)
def isIgnored(self, page):
"""Return True if pages is to be ignored."""
if page.site.lang in self.conf.neverlink:
pywikibot.output('Skipping link {} to an ignored language'
.format(page))
return True
if page in self.conf.ignore:
pywikibot.output('Skipping link {} to an ignored page'
.format(page))
return True
return False
def reportInterwikilessPage(self, page):
"""Report interwikiless page."""
self.conf.note('{} does not have any interwiki links'
.format(self.origin))
if config.without_interwiki:
with codecs.open(
pywikibot.config.datafilepath('without_interwiki.txt'),
'a', 'utf-8') as f:
f.write('# {} \n'.format(page))
def askForHints(self, counter):
"""Ask for hints to other sites."""
if (not self.workonme # we don't work on it anyway
or not self.untranslated and not self.conf.askhints
or self.hintsAsked
or not self.origin
or not self.origin.exists()
or self.origin.isRedirectPage()
or self.origin.isCategoryRedirect()):
return
self.hintsAsked = True
if not self.conf.untranslated:
return
t = self.conf.showtextlink
if t:
pywikibot.output(self.origin.get()[:t])
while True:
newhint = pywikibot.input('Give a hint (? to see pagetext):')
if not newhint:
break
if newhint == '?':
t += self.conf.showtextlinkadd
pywikibot.output(self.origin.get()[:t])
elif ':' not in newhint:
pywikibot.output(fill(
'Please enter a hint in the format language:pagename '
'or type nothing if you do not have a hint.'))
else:
links = titletranslate.translate(
self.origin,
hints=[newhint],
auto=self.conf.auto,
removebrackets=self.conf.hintnobracket)
for link in links:
page = pywikibot.Page(link)
self.addIfNew(page, counter, None)
if self.conf.hintsareright:
self.hintedsites.add(page.site)
def batchLoaded(self, counter):
"""
Notify that the promised batch of pages was loaded.
This is called by a worker to tell us that the promised batch of
pages was loaded.
In other words, all the pages in self.pending have already
been preloaded.
The only argument is an instance of a counter class, that has methods
minus() and plus() to keep counts of the total work todo.
"""
# Loop over all the pages that should have been taken care of
for page in self.pending:
# Mark the page as done
self.done.append(page)
# make sure that none of the linked items is an auto item
if self.conf.skipauto:
dictName, year = page.autoFormat()
if dictName is not None:
if self.origin:
pywikibot.warning(
'{}:{} relates to {}:{}, which is an '
'auto entry {}({})'
.format(self.origin.site.lang, self.origin,
page.site.lang, page, dictName, year))
# Abort processing if the bot is running in autonomous mode
if self.conf.autonomous:
self.makeForcedStop(counter)
# Register this fact at the todo-counter.
counter.minus(page.site)
# Now check whether any interwiki links should be added to the
# todo list.
if not page.exists():
self.conf.remove.append(str(page))
self.conf.note('{} does not exist. Skipping.'.format(page))
if page == self.origin:
# The page we are working on is the page that does not
# exist. No use in doing any work on it in that case.
for site, count in self.todo.iter_values_len():
counter.minus(site, count)
self.todo.clear()
# In some rare cases it might be we already did check some
# 'automatic' links
self.done.clear()
continue
if page.isRedirectPage() or page.isCategoryRedirect():
if page.isRedirectPage():
redirectTargetPage = page.getRedirectTarget()
redir = ''
else:
redirectTargetPage = page.getCategoryRedirectTarget()
redir = 'category '
self.conf.note('{} is {}redirect to {}'
.format(page, redir, redirectTargetPage))
if self.origin is None or page == self.origin:
# the 1st existig page becomes the origin page, if none was
# supplied
if self.conf.initialredirect:
# don't follow another redirect; it might be a self
# loop
if not redirectTargetPage.isRedirectPage() \
and not redirectTargetPage.isCategoryRedirect():
self.origin = redirectTargetPage
self.todo.append(redirectTargetPage)
counter.plus(redirectTargetPage.site)
else:
# This is a redirect page to the origin. We don't need
# to follow the redirection.
# In this case we can also stop all hints!
for site, count in self.todo.iter_values_len():
counter.minus(site, count)
self.todo.clear()
elif not self.conf.followredirect:
self.conf.note('not following {}redirects.'.format(redir))
elif page.isStaticRedirect():
self.conf.note('not following static {}redirects.'
.format(redir))
elif (page.site.family == redirectTargetPage.site.family
and not self.skipPage(page, redirectTargetPage,
counter)):
if self.addIfNew(redirectTargetPage, counter, page):
if config.interwiki_shownew:
pywikibot.output('{}: {} gives new {}redirect {}'
.format(self.origin,
page, redir,
redirectTargetPage))
continue
# must be behind the page.isRedirectPage() part
# otherwise a redirect error would be raised
if page_empty_check(page):
self.conf.remove.append(str(page))
self.conf.note('{} is empty. Skipping.'.format(page))
if page == self.origin:
for site, count in self.todo.iter_values_len():
counter.minus(site, count)
self.todo.clear()
self.done.clear()
self.origin = None
continue
if page.section():
self.conf.note('{} is a page section. Skipping.'.format(page))
continue
# Page exists, isn't a redirect, and is a plain link (no section)
if self.origin is None:
# the 1st existig page becomes the origin page, if none was
# supplied
self.origin = page
try:
iw = page.langlinks()
except UnknownSiteError:
self.conf.note('site {} does not exist.'.format(page.site))
continue
(skip, alternativePage) = self.disambigMismatch(page, counter)
if skip:
pywikibot.output('NOTE: ignoring {} and its interwiki links'
.format(page))
self.done.remove(page)
iw = ()
if alternativePage:
# add the page that was entered by the user
self.addIfNew(alternativePage, counter, None)
duplicate = None
for p in self.done.filter(page.site):
if p != page and p.exists() and \
not p.isRedirectPage() and not p.isCategoryRedirect():
duplicate = p
break
if self.origin == page:
self.untranslated = not iw
if self.conf.untranslatedonly:
# Ignore the interwiki links.
iw = ()
if self.conf.lacklanguage:
if self.conf.lacklanguage in (link.site.lang
for link in iw):
iw = ()
self.workonme = False
if len(iw) < self.conf.minlinks:
iw = ()
self.workonme = False
elif self.conf.autonomous and duplicate and not skip:
pywikibot.output('Stopping work on {} because duplicate pages'
' {} and {} are found'
.format(self.originP, duplicate, page))
self.makeForcedStop(counter)
try:
with codecs.open(
pywikibot.config.datafilepath(
'autonomous_problems.dat'),
'a', 'utf-8') as f:
f.write('* {} {{Found more than one link for {}}}'
.format(self.origin, page.site))
if config.interwiki_graph \
and config.interwiki_graph_url:
filename = interwiki_graph.getFilename(
self.origin,
extension=config.interwiki_graph_formats[0])
f.write(' [{}{} graph]'
.format(config.interwiki_graph_url,
filename))
f.write('\n')
# FIXME: What errors are we catching here?
except Exception:
# raise
pywikibot.output(
'File autonomous_problems.dat open or corrupted! '
'Try again with -restore.')
sys.exit()
iw = ()
for link in iw:
linkedPage = pywikibot.Page(link)
if self.conf.hintsareright \
and linkedPage.site in self.hintedsites:
pywikibot.output(
'NOTE: {}: {} extra interwiki on hinted site '
'ignored {}'.format(self.origin, page, linkedPage))
break
if not self.skipPage(page, linkedPage, counter):
if self.conf.followinterwiki or page == self.origin:
if self.addIfNew(linkedPage, counter, page):
# It is new. Also verify whether it is the second
# on the same site
lpsite = linkedPage.site
for prevPage in self.found_in:
if prevPage != linkedPage and \
prevPage.site == lpsite:
# Still, this could be "no problem" as
# either may be a redirect to the other.
# No way to find out quickly!
pywikibot.output(
'NOTE: {}: {} gives duplicate '
'interwiki on same site {}'
.format(self.origin, page, linkedPage))
break
else:
if config.interwiki_shownew:
pywikibot.output(
'{}: {} gives new interwiki {}'
.format(self.origin, page, linkedPage))
if self.forcedStop:
break
# These pages are no longer 'in progress'
self.pending.clear()
# Check whether we need hints and the user offered to give them
if self.untranslated and not self.hintsAsked:
self.reportInterwikilessPage(page)
self.askForHints(counter)
def isDone(self):
"""Return True if all the work for this subject has completed."""
return not self.todo
def problem(self, txt, createneed=True):
"""Report a problem with the resolution of this subject."""
pywikibot.error(txt)
self.confirm = True
if createneed:
self.problemfound = True
def whereReport(self, page, indent=4):
"""Report found interlanguage links with conflicts."""
for page2 in sorted(self.found_in[page]):
if page2 is None:
pywikibot.output(' ' * indent + 'Given as a hint.')
else:
pywikibot.output(' ' * indent + str(page2))
def assemble(self):
"""Assemble language links."""
# No errors have been seen so far, except....
errorCount = self.problemfound
# Build up a dictionary of all pages found, with the site as key.
# Each value will be a list of pages.
new = defaultdict(list)
for page in self.done:
if page.exists() and not page.isRedirectPage() \
and not page.isCategoryRedirect():
site = page.site
if site.family.interwiki_forward:
# TODO: allow these cases to be propagated!
# inhibit the forwarding families pages to be updated.
continue
if site != self.origin.site:
new[site].append(page)
elif page != self.origin:
self.problem('Found link to ' + page)
self.whereReport(page)
errorCount += 1
# See if new{} contains any problematic values
result = {}
for site, pages in new.items():
if len(pages) > 1:
errorCount += 1
self.problem('Found more than one link for ' + site)
if not errorCount and not self.conf.select:
# no errors, so all lists have only one item
for site, pages in new.items():
result[site] = pages[0]
return result
# There are any errors.
if config.interwiki_graph:
graphDrawer = interwiki_graph.GraphDrawer(self)
graphDrawer.createGraph()
# We don't need to continue with the rest if we're in autonomous
# mode.
if self.conf.autonomous:
return None
# First loop over the ones that have more solutions
for site, pages in new.items():
if len(pages) <= 1:
continue
pywikibot.output('=' * 30)
pywikibot.output('Links to ' + site)
for i, page2 in enumerate(pages, 1):
pywikibot.output(' ({}) Found link to {} in:'
.format(i, page2))
self.whereReport(page2, indent=8)
# TODO: allow answer to repeat previous or go back after a mistake
answer = pywikibot.input_choice(
'Which variant should be used?',
(ListOption(pages),
StandardOption('none', 'n'),
StandardOption('give up', 'g')))
if answer == 'g':
return None
if answer != 'n':
result[site] = answer[1]
# Loop over the ones that have one solution, so are in principle
# not a problem.
acceptall = False
for site, pages in new.items():
if len(pages) != 1:
continue
if not acceptall:
pywikibot.output('=' * 30)
page2 = pages[0]
pywikibot.output('Found link to {} in:'.format(page2))
self.whereReport(page2, indent=4)
while True:
if acceptall:
answer = 'a'
else:
# TODO: allow answer to repeat previous or go back
# after a mistake
answer = pywikibot.input_choice(
'What should be done?',
[('accept', 'a'), ('reject', 'r'),
('give up', 'g'), ('accept all', 'l')], 'a',
automatic_quit=False)
if answer == 'l': # accept all
acceptall = True
answer = 'a'
if answer == 'a': # accept this one
result[site] = pages[0]
break
if answer == 'g': # give up
return None
if answer == 'r': # reject
# None acceptable
break
return result
def finish(self):
"""
Round up the subject, making any necessary changes.
This should be called exactly once after the todo list has gone empty.
"""
if not self.isDone():
raise Exception('Bugcheck: finish called before done')
if not self.workonme or not self.origin:
return
if self.origin.isRedirectPage() or self.origin.isCategoryRedirect():
return
if not self.untranslated and self.conf.untranslatedonly:
return
if self.forcedStop: # autonomous with problem
pywikibot.output('======Aborted processing {}======'
.format(self.origin))
return
# The following check is not always correct and thus disabled.
# self.done might contain no interwiki links because of the -neverlink
# argument or because of disambiguation conflicts.
# if len(self.done) == 1:
# # No interwiki at all
# return
pywikibot.output('======Post-processing {}======'
.format(self.origin))
# Assemble list of accepted interwiki links
new = self.assemble()
if new is None: # User said give up
pywikibot.output('======Aborted processing {}======'
.format(self.origin))
return
# Make sure new contains every page link, including the page we are
# processing
# TODO: should be move to assemble()
# replaceLinks will skip the site it's working on.
# TODO: make this possible as well.
if self.origin.site not in new \
and not self.origin.site.family.interwiki_forward:
new[self.origin.site] = self.origin
updatedSites = []
notUpdatedSites = []
# Process all languages here
self.conf.always = False
if self.conf.limittwo:
lclSite = self.origin.site
lclSiteDone = False
frgnSiteDone = False
for siteCode in lclSite.family.languages_by_size:
site = pywikibot.Site(siteCode, lclSite.family)
if (not lclSiteDone and site == lclSite) \
or (not frgnSiteDone and site != lclSite and site in new):
if site == lclSite:
lclSiteDone = True # even if we fail the update
if (site.family.name in config.usernames
and site.code in config.usernames[
site.family.name]):
try:
if self.replaceLinks(new[site], new):
updatedSites.append(site)
if site != lclSite:
frgnSiteDone = True
except SaveError:
notUpdatedSites.append(site)
except GiveUpOnPage:
break
elif (not self.conf.strictlimittwo
and site in new and site != lclSite):
old = {}
try:
for link in new[site].iterlanglinks():
page = pywikibot.Page(link)
old[page.site] = page
except NoPageError:
pywikibot.output('BUG>>> {} no longer exists?'
.format(new[site]))
continue
mods, mcomment, adding, removing, modifying \
= compareLanguages(old, new, lclSite,
self.conf.summary)
if (removing and not self.conf.autonomous
or modifying and self.problemfound
or not old
or (self.conf.needlimit
and len(adding) + len(modifying)
>= self.conf.needlimit + 1)):
try:
if self.replaceLinks(new[site], new):
updatedSites.append(site)
except SaveError:
notUpdatedSites.append(site)
except NoUsernameError:
pass
except GiveUpOnPage:
break
else:
for (site, page) in new.items():
# if we have an account for this site
if site.family.name in config.usernames \
and site.code in config.usernames[site.family.name] \
and not site.has_data_repository:
# Try to do the changes
try:
if self.replaceLinks(page, new):
# Page was changed
updatedSites.append(site)
except SaveError:
notUpdatedSites.append(site)
except GiveUpOnPage:
break
# disabled graph drawing for minor problems: it just takes too long
# if notUpdatedSites != [] and config.interwiki_graph:
# # at least one site was not updated, save a conflict graph
# self.createGraph()
# don't report backlinks for pages we already changed
if config.interwiki_backlink:
self.reportBacklinks(new, updatedSites)
def replaceLinks(self, page, newPages):
"""Return True if saving was successful."""
# In this case only continue on the Page we started with
if self.conf.localonly and page != self.origin:
raise SaveError('-localonly and page != origin')
if page.section():
# This is not a page, but a subpage. Do not edit it.
pywikibot.output('Not editing {}: not doing interwiki on subpages'
.format(page))
raise SaveError('Link has a #section')
try:
pagetext = page.get()
except NoPageError:
pywikibot.output('Not editing {}: page does not exist'
.format(page))
raise SaveError("Page doesn't exist")
if page_empty_check(page):
pywikibot.output('Not editing {}: page is empty'.format(page))
raise SaveError('Page is empty.')
# clone original newPages dictionary, so that we can modify it to the
# local page's needs
new = newPages.copy()
interwikis = [pywikibot.Page(link) for link in page.iterlanglinks()]
# remove interwiki links to ignore
for iw in re.finditer(r'<!-- *\[\[(.*?:.*?)\]\] *-->', pagetext):
with suppress(KeyError,
SiteDefinitionError,
InvalidTitleError):
ignorepage = pywikibot.Page(page.site, iw.groups()[0])
if new[ignorepage.site] == ignorepage \
and ignorepage.site != page.site:
param = {'to': ignorepage, 'from': page}
if ignorepage not in interwikis:
pywikibot.output('Ignoring link to {to} for {from}'
.format_map(param))
new.pop(ignorepage.site)
else:
pywikibot.output(
'NOTE: Not removing interwiki from {from} to '
'{to} (exists both commented and non-commented)'
.format_map(param))
# sanity check - the page we are fixing must be the only one for that
# site.
pltmp = new[page.site]
if pltmp != page:
s = 'None'
if pltmp is not None:
s = pltmp
pywikibot.output(
'BUG>>> {} is not in the list of new links! Found {}.'
.format(page, s))
raise SaveError('BUG: sanity check failed')
# Avoid adding an iw link back to itself
del new[page.site]
# Do not add interwiki links to foreign families that page.site() does
# not forward to
for stmp in new.keys():
if stmp.family != page.site.family \
and stmp.family.name != page.site.family.interwiki_forward:
del new[stmp]
# Put interwiki links into a map
old = {}
for page2 in interwikis:
old[page2.site] = page2
# Check what needs to get done
mods, mcomment, adding, removing, modifying = compareLanguages(
old,
new,
page.site,
self.conf.summary
)
# When running in autonomous mode without -force switch, make sure we
# don't remove any items, but allow addition of the new ones
if self.conf.autonomous and not self.conf.force and removing:
for rmsite in removing:
# Sometimes sites have an erroneous link to itself as an
# interwiki
if rmsite == page.site:
continue
rmPage = old[rmsite]
# put it to new means don't delete it
if (not self.conf.cleanup
or str(rmPage) not in self.conf.remove):
new[rmsite] = rmPage
pywikibot.warning(
'{} is either deleted or has a mismatching '
'disambiguation state.'.format(rmPage))
# Re-Check what needs to get done
mods, mcomment, adding, removing, modifying = compareLanguages(
old,
new,
page.site,
self.conf.summary
)
if not mods:
self.conf.note('No changes needed on page {}'.format(page))
return False
# Show a message in purple.
pywikibot.output(color_format(
'{lightpurple}Updating links on page {0}.{default}', page))
pywikibot.output('Changes to be made: {}'.format(mods))
oldtext = page.get()
template = (page.namespace() == 10)
newtext = textlib.replaceLanguageLinks(oldtext, new,
site=page.site,
template=template)
# This is for now. Later there should be different funktions for each
# kind
if not botMayEdit(page):
if template:
pywikibot.output(
'SKIPPING: {} should have interwiki links on subpage.'
.format(page))
else:
pywikibot.output(
'SKIPPING: {} is under construction or to be deleted.'
.format(page))
return False
if newtext == oldtext:
return False
pywikibot.showDiff(oldtext, newtext)
# Determine whether we need permission to submit
ask = False
# Allow for special case of a self-pointing interwiki link
if removing and removing != [page.site]:
self.problem('Found incorrect link to {} in {}'
.format(', '.join(x.code for x in removing), page),
createneed=False)
ask = True
if self.conf.force or self.conf.cleanup:
ask = False
if self.conf.confirm and not self.conf.always:
ask = True
if not ask:
# If we do not need to ask, allow
answer = 'y'
elif self.conf.autonomous:
# If we cannot ask, deny permission
answer = 'n'
else: # If we need to ask, do so
answer = pywikibot.input_choice('Submit?',
[('Yes', 'y'), ('No', 'n'),
('open in Browser', 'b'),
('Give up', 'g'),
('Always', 'a')],
automatic_quit=False)
if answer == 'b':
pywikibot.bot.open_webbrowser(page)
return True
if answer == 'a':
# don't ask for the rest of this subject
self.conf.always = True
answer = 'y'
if answer == 'g':
raise GiveUpOnPage('User asked us to give up')
# If we got permission to submit, do so
if answer != 'y':
raise LinkMustBeRemoved(
'Found incorrect link to {} in {}'
.format(', '.join(x.code for x in removing), page))
self.conf.note('Updating live wiki...')
timeout = 60
page.text = newtext
while True:
try:
page.save(summary=mcomment,
asynchronous=self.conf.asynchronous,
nocreate=True)
except NoCreateError:
pywikibot.exception()
return False
except LockedPageError:
pywikibot.output('Page {} is locked. Skipping.'
.format(page))
raise SaveError('Locked')
except EditConflictError:
pywikibot.output(
'ERROR putting page: An edit conflict occurred. '
'Giving up.')
raise SaveError('Edit conflict')
except SpamblacklistError as error:
pywikibot.output(
'ERROR putting page: {} blacklisted by spamfilter. '
'Giving up.'.format(error.url))
raise SaveError('Spam filter')
except PageSaveRelatedError as error:
pywikibot.output('ERROR putting page: {}'
.format(error.args,))
raise SaveError('PageSaveRelatedError')
except (socket.error, IOError) as error:
if timeout > 3600:
raise
pywikibot.output('ERROR putting page: {}'
.format(error.args,))
pywikibot.output('Sleeping {} seconds before trying again.'
.format(timeout,))
timeout *= 2
pywikibot.sleep(timeout)
except ServerError:
if timeout > 3600:
raise
pywikibot.output('ERROR putting page: ServerError.')
pywikibot.output('Sleeping {} seconds before trying again.'
.format(timeout,))
timeout *= 2
pywikibot.sleep(timeout)
else:
break
return True
def reportBacklinks(self, new, updatedSites):
"""
Report missing back links. This will be called from finish() if needed.
updatedSites is a list that contains all sites we changed, to avoid
reporting of missing backlinks for pages we already fixed
"""
# use sets because searching an element is faster than in lists
expectedPages = set(new.values())
expectedSites = set(new)
try:
for site in expectedSites - set(updatedSites):
page = new[site]
if page.section():
continue
try:
linkedPages = {pywikibot.Page(link)
for link in page.iterlanglinks()}
except NoPageError:
pywikibot.warning(
'Page {} does no longer exist?!'.format(page))
break
# To speed things up, create a dictionary which maps sites
# to pages. This assumes that there is only one interwiki
# link per language.
linkedPagesDict = {}
for linkedPage in linkedPages:
linkedPagesDict[linkedPage.site] = linkedPage
for expectedPage in expectedPages - linkedPages:
if expectedPage == page:
continue
try:
linkedPage = linkedPagesDict[expectedPage.site]
pywikibot.warning(
'{}: {} does not link to {} but to {}'
.format(page.site.family.name,
page, expectedPage, linkedPage))
except KeyError:
if not expectedPage.site.is_data_repository():
pywikibot.warning('{}: {} does not link to {}'
.format(page.site.family.name,
page, expectedPage))
# Check for superfluous links
for linkedPage in linkedPages:
if linkedPage in expectedPages:
continue
# Check whether there is an alternative page on
# that language.
# In this case, it was already reported above.
if linkedPage.site not in expectedSites:
pywikibot.warning('{}: {} links to incorrect {}'
.format(page.site.family.name,
page, linkedPage))
except (socket.error, IOError):
pywikibot.output('ERROR: could not report backlinks')
class InterwikiBot:
"""
A class keeping track of a list of subjects.
It controls which pages are queried from which languages when.
"""
def __init__(self, conf=None):
"""Initializer."""
self.subjects = []
# We count how many pages still need to be loaded per site.
# This allows us to find out from which site to retrieve pages next
# in a way that saves bandwidth.
# sites are keys, integers are values.
# Modify this only via plus() and minus()!
self.counts = Counter()
self.pageGenerator = None
self.generated = 0
self.conf = conf
self.site = pywikibot.Site()
def add(self, page, hints=None):
"""Add a single subject to the list."""
subj = Subject(page, hints=hints, conf=self.conf)
self.subjects.append(subj)
for site, count in subj.openSites():
# Keep correct counters
self.plus(site, count)
def setPageGenerator(self, pageGenerator, number=None, until=None):
"""
Add a generator of subjects.
Once the list of subjects gets too small,
this generator is called to produce more Pages.
"""
self.pageGenerator = pageGenerator
self.generateNumber = number
self.generateUntil = until
@property
def dump_titles(self):
"""Return generator of titles for dump file."""
return (s.origin.title(as_link=True) for s in self.subjects)
def generateMore(self, number):
"""Generate more subjects.
This is called internally when the
list of subjects becomes too small, but only if there is a
PageGenerator
"""
fs = self.firstSubject()
if fs:
self.conf.note('The first unfinished subject is {}'
.format(fs.origin))
pywikibot.output(
'NOTE: Number of pages queued is {}, trying to add {} more.'
.format(len(self.subjects), number))
for i in range(number):
for page in self.pageGenerator:
if page in self.conf.skip:
pywikibot.output('Skipping: {} is in the skip list'
.format(page))
continue
if self.conf.skipauto:
dictName, year = page.autoFormat()
if dictName is not None:
pywikibot.output(
'Skipping: {} is an auto entry {}({})'
.format(page, dictName, year))
continue
if self.conf.parenthesesonly:
# Only yield pages that have ( ) in titles
if '(' not in page.title():
continue
if page.isTalkPage():
pywikibot.output('Skipping: {} is a talk page'
.format(page))
continue
if page.namespace() == 10:
loc = None
with suppress(KeyError):
tmpl, loc = moved_links[page.site.code]
del tmpl
if loc is not None and loc in page.title():
pywikibot.output(
'Skipping: {} is a templates subpage'
.format(page.title()))
continue
break
else: # generator stopped
break
if self.generateUntil:
until = self.generateUntil
page_namespace = (
page.site.namespaces[int(page.namespace())])
if page_namespace.case == 'first-letter':
until = first_upper(until)
if page.title(with_ns=False) > until:
break
self.add(page, hints=self.conf.hints)
self.generated += 1
if self.generateNumber and self.generated >= self.generateNumber:
break
else:
return
# for loop was exited by break statement
self.pageGenerator = None
def firstSubject(self):
"""Return the first subject that is still being worked on."""
if self.subjects:
return self.subjects[0]
return None
def maxOpenSite(self):
"""
Return the site that has the most open queries plus the number.
If there is nothing left, return None.
Only languages that are TODO for the first Subject are returned.
"""
if not self.firstSubject():
return None
oc = dict(self.firstSubject().openSites())
if not oc:
# The first subject is done. This might be a recursive call made
# because we have to wait before submitting another modification to
# go live. Select any language from counts.
oc = self.counts
if self.site in oc:
return self.site
for site, _ in self.counts.most_common():
if site in oc:
return site
return None
def selectQuerySite(self):
"""Select the site the next query should go out for."""
# How many home-language queries we still have?
mycount = self.counts[self.site]
# Do we still have enough subjects to work on for which the
# home language has been retrieved? This is rough, because
# some subjects may need to retrieve a second home-language page!
if len(self.subjects) - mycount < self.conf.minsubjects:
# Can we make more home-language queries by adding subjects?
if self.pageGenerator and mycount < self.conf.maxquerysize:
timeout = 60
while timeout < 3600:
try:
self.generateMore(self.conf.maxquerysize - mycount)
except ServerError:
# Could not extract allpages special page?
pywikibot.output(
'ERROR: could not retrieve more pages. '
'Will try again in {} seconds'
.format(timeout))
pywikibot.sleep(timeout)
timeout *= 2
else:
break
# If we have a few, getting the home language is a good thing.
if not self.conf.restore_all and self.counts[self.site] > 4:
return self.site
# If getting the home language doesn't make sense, see how many
# foreign page queries we can find.
return self.maxOpenSite()
def oneQuery(self):
"""
Perform one step in the solution process.
Returns True if pages could be preloaded, or false
otherwise.
"""
# First find the best language to work on
site = self.selectQuerySite()
if site is None:
pywikibot.output('NOTE: Nothing left to do')
return False
# Now assemble a reasonable list of pages to get
subjectGroup = []
pageGroup = []
for subject in self.subjects:
# Promise the subject that we will work on the site.
# We will get a list of pages we can do.
pages = subject.whatsNextPageBatch(site)
if pages:
pageGroup.extend(pages)
subjectGroup.append(subject)
if len(pageGroup) >= self.conf.maxquerysize:
# We have found enough pages to fill the bandwidth.
break
if not pageGroup:
pywikibot.output('NOTE: Nothing left to do 2')
return False
# Get the content of the assembled list in one blow
gen = site.preloadpages(pageGroup, templates=True, langlinks=True,
pageprops=True)
for page in gen:
# we don't want to do anything with them now. The
# page contents will be read via the Subject class.
pass
# Tell all of the subjects that the promised work is done
for subject in subjectGroup:
subject.batchLoaded(self)
return True
def queryStep(self):
"""Delete the ones that are done now."""
self.oneQuery()
for i in range(len(self.subjects) - 1, -1, -1):
subj = self.subjects[i]
if subj.isDone():
subj.finish()
del self.subjects[i]
def isDone(self):
"""Check whether there is still more work to do."""
return not self and self.pageGenerator is None
def plus(self, site, count=1):
"""Helper routine that the Subject class expects in a counter."""
self.counts[site] += count
def minus(self, site, count=1):
"""Helper routine that the Subject class expects in a counter."""
self.counts[site] -= count
self.counts = +self.counts # remove zero and negative counts
def run(self):
"""Start the process until finished."""
while not self.isDone():
self.queryStep()
def __len__(self):
"""Return length of subjects."""
return len(self.subjects)
def compareLanguages(old, new, insite, summary):
"""Compare changes and setup i18n message."""
oldiw = set(old)
newiw = set(new)
# sort by language code
adding = sorted(newiw - oldiw)
removing = sorted(oldiw - newiw)
modifying = sorted(site for site in oldiw & newiw
if old[site] != new[site])
if not summary and len(adding) + len(removing) + len(modifying) <= 3:
# Use an extended format for the string linking to all added pages.
def fmt(d, site):
return str(d[site])
else:
# Use short format, just the language code
def fmt(d, site):
return site.code
mods = mcomment = ''
commentname = 'interwiki'
if adding:
commentname += '-adding'
if removing:
commentname += '-removing'
if modifying:
commentname += '-modifying'
if commentname == 'interwiki-modifying' and len(modifying) == 1:
useFrom = True
commentname += '-from'
else:
useFrom = False
if adding or removing or modifying:
mcomment += summary
comma = insite.mediawiki_message('comma-separator')
changes = {'adding': comma.join(fmt(new, x) for x in adding),
'removing': comma.join(fmt(old, x) for x in removing),
'modifying': comma.join(fmt(new, x) for x in modifying),
'from': '' if not useFrom else old[modifying[0]]}
en_changes = {'adding': ', '.join(fmt(new, x) for x in adding),
'removing': ', '.join(fmt(old, x) for x in removing),
'modifying': ', '.join(fmt(new, x) for x in modifying),
'from': '' if not useFrom else old[modifying[0]]}
mcomment += i18n.twtranslate(insite, commentname, changes)
mods = i18n.twtranslate('en', commentname, en_changes)
return mods, mcomment, adding, removing, modifying
def botMayEdit(page):
"""Test for allowed edits."""
tmpl = []
with suppress(KeyError):
tmpl, _ = moved_links[page.site.code]
if not isinstance(tmpl, list):
tmpl = [tmpl]
with suppress(KeyError):
tmpl += ignoreTemplates[page.site.code]
tmpl += ignoreTemplates['_default']
if tmpl != []:
templates = page.templatesWithParams()
for template in templates:
if template[0].title(with_ns=False).lower() in tmpl:
return False
return True
def page_empty_check(page):
"""
Return True if page should be skipped as it is almost empty.
Pages in content namespaces are considered empty if they contain less than
50 characters, and other pages are considered empty if they are not
category pages and contain less than 4 characters excluding interlanguage
links and categories.
@rtype: bool
"""
txt = page.text
# Check if the page is in content namespace
if page.namespace().content:
# Check if the page contains at least 50 characters
return len(txt) < 50
if not page.is_categorypage():
txt = textlib.removeLanguageLinks(txt, site=page.site)
txt = textlib.removeCategoryLinks(txt, site=page.site)
return len(txt) < 4
return False
class InterwikiDumps(OptionHandler):
"""Handle interwiki dumps."""
available_options = {
'do_continue': False,
'restore_all': False
}
FILE_PATTERN = '{site.family.name}-{site.code}.txt'
def __init__(self, **kwargs):
"""Initializer.
@keyword do_continue: If true, continue alphabetically starting at the
last of the dumped pages.
"""
self.site = kwargs.pop('site', pywikibot.Site())
super().__init__(**kwargs)
self.restored_files = set()
self._next_page = '!'
self._next_namespace = 0
self.path = pywikibot.config.datafilepath('data', 'interwiki-dumps')
@property
def next_page(self):
"""Return next page title string for continue option."""
if self._next_page == '!':
pywikibot.output('Dump file is empty! Starting at the beginning.')
return self._next_page
@property
def next_namespace(self):
"""Return next page namespace for continue option."""
return self._next_namespace
def remove(self, filename: str):
"""Remove filename from restored files.
@param filename: A filename to be removed from restored set.
"""
with suppress(KeyError):
self.restored_files.remove(filename)
def get_files(self):
"""Get dump files from directory."""
pattern = r'(?P<file>(?P<fam>[a-z]+)-(?P<code>[a-z]+)\.txt)'
for filename in os.listdir(self.path):
found = re.fullmatch(pattern, filename)
if found:
yield (found['file'],
pywikibot.Site(found['code'], found['fam']))
@property
def files(self):
"""Return file generator depending on restore_all option.
rtype: generator
"""
if self.opt.restore_all:
return self.get_files()
return iter([(self.FILE_PATTERN.format(site=self.site), self.site)])
def read_dump(self):
"""Read the dump file.
@rtype: generator
"""
for tail, site in self.files:
filename = os.path.join(self.path, tail)
if not os.path.exists(filename):
pywikibot.output(tail + ' does not exist.')
continue
pywikibot.output('Retrieving pages from dump file ' + tail)
for page in pagegenerators.TextfilePageGenerator(filename, site):
if site == self.site:
self._next_page = page.title(with_ns=False) + '!'
self._next_namespace = page.namespace()
yield page
self.restored_files.add(filename)
if self.opt.do_continue:
yield from self.site.allpages(start=self.next_page,
namespace=self.next_namespace,
filterredir=False)
def write_dump(self, iterable, append: bool = True):
"""Write dump file.
@param iterable: an iterable of page titles to be dumped.
@type iterable: iterable
@param append: if a dump already exits, append the page titles to it
if True else overwrite it.
"""
filename = os.path.join(self.path,
self.FILE_PATTERN.format(site=self.site))
mode = 'appended' if append else 'written'
with codecs.open(filename, mode[0], 'utf-8') as f:
f.write('\r\n'.join(iterable))
f.write('\r\n')
pywikibot.output('Dump {site.code} ({site.family.name}) {mode}.'
.format(site=self.site, mode=mode))
self.remove(filename)
def delete_dumps(self):
"""Delete processed dumps."""
for filename in self.restored_files:
tail = os.path.split(filename)[-1]
try:
os.remove(filename)
except OSError as e:
pywikibot.error('Cannot delete {} due to\n{}\nDo it manually.'
.format(tail, e))
else:
pywikibot.output('Dumpfile {} deleted'.format(tail))
def main(*args):
"""Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
singlePageTitle = ''
opthintsonly = False
# Which namespaces should be processed?
# default to [] which means all namespaces will be processed
namespaces = []
number = None
until = None
# a normal PageGenerator (which doesn't give hints, only Pages)
hintlessPageGen = None
optContinue = False
optRestore = False
append = True
newPages = None
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
iwconf = InterwikiBotConfig()
for arg in local_args:
if iwconf.readOptions(arg):
continue
if arg.startswith('-years'):
# Look if user gave a specific year at which to start
# Must be a natural number or negative integer.
if len(arg) > 7 and (arg[7:].isdigit()
or (arg[7] == '-' and arg[8:].isdigit())):
startyear = int(arg[7:])
else:
startyear = 1
# avoid problems where year pages link to centuries etc.
iwconf.followredirect = False
hintlessPageGen = pagegenerators.YearPageGenerator(startyear)
elif arg.startswith('-days'):
if len(arg) > 6 and arg[5] == ':' and arg[6:].isdigit():
# Looks as if the user gave a specific month at which to start
# Must be a natural number.
startMonth = int(arg[6:])
else:
startMonth = 1
hintlessPageGen = pagegenerators.DayPageGenerator(startMonth)
elif arg.startswith('-new'):
if len(arg) > 5 and arg[4] == ':' and arg[5:].isdigit():
# Looks as if the user gave a specific number of pages
newPages = int(arg[5:])
else:
newPages = 100
elif arg.startswith('-restore'):
iwconf.restore_all = arg[9:].lower() == 'all'
optRestore = not iwconf.restore_all
elif arg == '-continue':
optContinue = True
elif arg == '-hintsonly':
opthintsonly = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[11:]))
except ValueError:
namespaces.append(arg[11:])
# deprecated for consistency with other scripts
elif arg.startswith('-number:'):
number = int(arg[8:])
elif arg.startswith('-until:'):
until = arg[7:]
else:
if not genFactory.handle_arg(arg):
if not singlePageTitle:
singlePageTitle = arg
# Do not use additional summary with autonomous mode
if iwconf.autonomous:
iwconf.summary = ''
elif iwconf.summary:
iwconf.summary += '; '
site = pywikibot.Site()
# ensure that we don't try to change main page
mainpagename = site.siteinfo['mainpage']
iwconf.skip.add(pywikibot.Page(site, mainpagename))
dump = InterwikiDumps(site=site, do_continue=optContinue,
restore_all=iwconf.restore_all)
if newPages is not None:
if not namespaces:
ns = 0
elif len(namespaces) == 1:
ns = namespaces[0]
if isinstance(ns, str) and ns != 'all':
index = site.namespaces.lookup_name(ns)
if index is None:
raise ValueError('Unknown namespace: ' + ns)
ns = index.id
namespaces = []
else:
ns = 'all'
hintlessPageGen = pagegenerators.NewpagesPageGenerator(total=newPages,
namespaces=ns)
elif optRestore or optContinue or iwconf.restore_all:
hintlessPageGen = dump.read_dump()
bot = InterwikiBot(iwconf)
if not hintlessPageGen:
hintlessPageGen = genFactory.getCombinedGenerator()
if hintlessPageGen:
if len(namespaces) > 0:
hintlessPageGen = pagegenerators.NamespaceFilterPageGenerator(
hintlessPageGen, namespaces, site)
# we'll use iter() to create make a next() function available.
bot.setPageGenerator(iter(hintlessPageGen), number=number, until=until)
else:
if not singlePageTitle and not opthintsonly:
singlePageTitle = pywikibot.input('Which page to check:')
if singlePageTitle:
singlePage = pywikibot.Page(pywikibot.Site(), singlePageTitle)
else:
singlePage = None
bot.add(singlePage, hints=iwconf.hints)
append = not (optRestore or optContinue or iwconf.restore_all)
try:
bot.run()
except KeyboardInterrupt:
dump.write_dump(bot.dump_titles, append)
except Exception:
pywikibot.exception(tb=bool(config.verbose_output))
dump.write_dump(bot.dump_titles, append)
else:
pywikibot.output('Script terminated sucessfully.')
finally:
dump.delete_dumps()
if __name__ == '__main__':
main()
|
the-stack_0_22249 | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def preorder(self, root: 'Node') -> List[int]:
def _helper(p, lst):
if not p:
return
for child in p.children:
_helper(child, lst)
lst.append(p.val)
res = []
_helper(root, res)
return res
|
the-stack_0_22250 | import sys
sys.path.append("/home/abdullah/Asac/data-structures-and-algorithms/python/code_challenges/graph")
from graph.graph import *
ver=Vertex('a')
ver2=Vertex('b')
# ver3=Vertex('d')
# ver4=Vertex('p')
graph=Graph()
graph.add_vertex(ver)
graph.add_vertex(ver2)
# graph.add_vertex(ver3)
# graph.add_vertex(ver4)
graph.add_edges(ver,ver2)
# graph.add_edges(ver,ver3)
# graph.add_edges(ver,ver4)
# print(len(graph.get_neighbors(ver)))
# print(graph.size()
# )
# print(graph._adjacency_list[f'{ver}'])
# graph._depthFirst(test)
# print(graph)
def business_trip(graph,arr):
"""
Determine whether the trip is possible with direct flights, and how much it would cost.
Args:
graph (Graph): contains all the list of the cities and its cost
arr (list): List of the cities
Return
tuple : (cost, Bool).
cost: Total cost for Trip.
Bool: True or False.
"""
cost=0
i=0
length=len(arr)-1
def check_cost(city1,city2):
''' Calculate the Cost value for the Trip between two City '''
neighbors=graph.get_neighbors(city1)
for city in neighbors:
if city.vertex.value == city2:
nonlocal cost , i
cost += city.weight
i+=1
if i == length:
return
check_cost(arr[length-i-1],arr[length-i])
try:
check_cost(arr[length-1],arr[length])
except:
return (bool(cost),cost)
return (bool(cost),cost)
|
the-stack_0_22253 |
import rlkit.misc.hyperparameter as hyp
from rlkit.images.camera import (
sawyer_init_camera_zoomed_in,
sawyer_init_camera,
)
from rlkit.launchers.launcher_util import run_experiment
from rlkit.misc.ml_util import PiecewiseLinearSchedule
from rlkit.torch.vae.conv_vae import ConvVAE
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
from rlkit.torch.vae.sawyer2d_push_new_easy_data import generate_vae_dataset
def experiment(variant):
from rlkit.core import logger
import rlkit.torch.pytorch_util as ptu
beta = variant["beta"]
representation_size = variant["representation_size"]
train_data, test_data, info = generate_vae_dataset(
**variant['get_data_kwargs']
)
logger.save_extra_data(info)
logger.get_snapshot_dir()
if 'beta_schedule_kwargs' in variant:
beta_schedule = PiecewiseLinearSchedule(**variant['beta_schedule_kwargs'])
else:
beta_schedule = None
m = ConvVAE(representation_size, input_channels=3)
if ptu.gpu_enabled():
m.to(ptu.device)
t = ConvVAETrainer(train_data, test_data, m, beta=beta,
beta_schedule=beta_schedule, **variant['algo_kwargs'])
save_period = variant['save_period']
for epoch in range(variant['num_epochs']):
should_save_imgs = (epoch % save_period == 0)
t.train_epoch(epoch)
t.test_epoch(epoch, save_reconstruction=should_save_imgs,
save_scatterplot=should_save_imgs)
if should_save_imgs:
t.dump_samples(epoch)
if __name__ == "__main__":
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-sawyer-push-new-vae'
use_gpu = True
# n_seeds = 1
# mode = 'ec2'
# exp_prefix = 'vae-sawyer-new-push-easy-zoomed-in-1000'
exp_prefix = 'vae-sawyer-reset-free-zoomed-in'
variant = dict(
beta=5.0,
num_epochs=100,
get_data_kwargs=dict(
# N=1000,
# init_camera=sawyer_init_camera_zoomed_in,
dataset_path='05-22-sawyer_push_dataset'
'/sawyer_reset_free_push1000_sawyer_init_camera_zoomed_in.npy'
),
algo_kwargs=dict(
do_scatterplot=False,
lr=1e-3,
),
beta_schedule_kwargs=dict(
x_values=[0, 30, 100],
# y_values=[0, 0, 0.1, 0.5],
y_values=[0, 5, 5],
),
save_period=5,
)
search_space = {
'representation_size': [16],
# 'beta_schedule_kwargs.y_values': [
# [0, 0, 0.1, 0.5],
# [0, 0, 0.1, 0.1],
# [0, 0, 5, 5],
# ],
# 'algo_kwargs.lr': [1e-3, 1e-2],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for _ in range(n_seeds):
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
run_experiment(
experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=use_gpu,
trial_dir_suffix='r'+str(variant.get('representation_size', 0)),
)
|
the-stack_0_22255 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
project = "mcap"
html_show_copyright = False
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"m2r2",
]
|
the-stack_0_22256 | from moto import mock_s3_deprecated, mock_s3
import boto
import json
import os
import tempfile
from skills_utils.s3 import download, upload, upload_dict, list_files, S3BackedJsonDict
@mock_s3_deprecated
def test_download():
s3_conn = boto.connect_s3()
bucket_name = 'test-bucket'
bucket = s3_conn.create_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name='apath/akey'
)
key.set_contents_from_string('test')
s3_path = 'test-bucket/apath/akey'
with tempfile.NamedTemporaryFile(mode='w+') as f:
download(s3_conn, f.name, s3_path)
f.seek(0)
assert f.read() == 'test'
@mock_s3_deprecated
def test_upload():
s3_conn = boto.connect_s3()
bucket_name = 'test-bucket'
bucket = s3_conn.create_bucket(bucket_name)
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('test')
f.seek(0)
s3_path = 'test-bucket/apath/akey'
upload(s3_conn, f.name, s3_path)
key = boto.s3.key.Key(
bucket=bucket,
name='apath/akey/{}'.format(os.path.basename(f.name))
)
s = key.get_contents_as_string()
assert s.decode('utf-8') == 'test'
@mock_s3_deprecated
def test_upload_dict():
s3_conn = boto.connect_s3()
bucket_name = 'test-bucket'
bucket = s3_conn.create_bucket(bucket_name)
path = 'apath/'
key = boto.s3.key.Key(
bucket=bucket,
name='{}/keyone.json'.format(path)
)
key.set_contents_from_string('old contents')
data_to_sync = {
'keyone': {'stuff': 'new contents'},
'keytwo': {'stuff2': 'new contents2'},
}
upload_dict(s3_conn, 'test-bucket/apath', data_to_sync)
assert key.get_contents_as_string().decode('utf-8')\
== '{"stuff": "new contents"}'
@mock_s3_deprecated
def test_list_files():
s3_conn = boto.connect_s3()
bucket_name = 'test-bucket'
bucket = s3_conn.create_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name='apath/test.json'
)
key.set_contents_from_string('some contents')
s3_path ='test-bucket/apath/'
files = list_files(s3_conn, s3_path)
assert files == ['test.json']
@mock_s3_deprecated
@mock_s3
def test_S3BackedJSONDict_basic():
s3_conn = boto.connect_s3()
bucket_name = 'test-bucket'
bucket = s3_conn.create_bucket(bucket_name)
# 1. Ensure that a new file is correctly created and saved to
storage_one = S3BackedJsonDict(path='test-bucket/apath')
storage_one['key1'] = 'value1'
storage_one['key2'] = {'nestedkey2': 'value2'}
storage_one.save()
key = boto.s3.key.Key(
bucket=bucket,
name='apath.json'
)
assert json.loads(key.get_contents_as_string().decode('utf-8'))\
== {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}}
# 2. Ensure that an existing file is correctly read, updated, and saved to
storage_two = S3BackedJsonDict(path='test-bucket/apath')
assert 'key1' in storage_two
assert storage_two['key1'] == 'value1'
storage_two['key3'] = 'value3'
storage_two.save()
assert json.loads(key.get_contents_as_string().decode('utf-8'))\
== {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3'}
# 3. Ensure that, in the same thread, updating and saving an old one gets new changes too
storage_one['key4'] = 'value4'
storage_one.save()
assert json.loads(key.get_contents_as_string().decode('utf-8'))\
== {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4'}
# 4. test autosave - this will be the fourth update of this object
storage_one.SAVE_EVERY_N_UPDATES = 4
storage_one['key5'] = 'value5'
assert json.loads(key.get_contents_as_string().decode('utf-8'))\
== {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4', 'key5': 'value5'}
# 5. test length checking
assert len(storage_one) == 5
# 6. test iteration
assert sorted(
[(key, value) for key, value in storage_one.items()],
key=lambda x: x[0]
) == [
('key1', 'value1'),
('key2', {'nestedkey2': 'value2'}),
('key3', 'value3'),
('key4', 'value4'),
('key5', 'value5')
]
|
the-stack_0_22258 | while True:
print('-'*20)
n = int(input('Quer ver a tabuada de qual valor? [Valor negativo para encerrar] : '))
print('-'*20)
if n < 0:
break
mult = 1
while mult <= 10:
print(f'{n} x {mult} = {n * mult}')
mult += 1
print('Programa encerrado!')
|
the-stack_0_22259 | """Config flow for Tradfri."""
import asyncio
from uuid import uuid4
import async_timeout
from pytradfri import Gateway, RequestError
from pytradfri.api.aiocoap_api import APIFactory
import voluptuous as vol
from homeassistant import config_entries
from .const import (
CONF_GATEWAY_ID,
CONF_HOST,
CONF_IDENTITY,
CONF_IMPORT_GROUPS,
CONF_KEY,
DOMAIN,
KEY_SECURITY_CODE,
)
class AuthError(Exception):
"""Exception if authentication occurs."""
def __init__(self, code):
"""Initialize exception."""
super().__init__()
self.code = code
class FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize flow."""
self._host = None
self._import_groups = False
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_auth()
async def async_step_auth(self, user_input=None):
"""Handle the authentication with a gateway."""
errors = {}
if user_input is not None:
host = user_input.get(CONF_HOST, self._host)
try:
auth = await authenticate(
self.hass, host, user_input[KEY_SECURITY_CODE]
)
# We don't ask for import group anymore as group state
# is not reliable, don't want to show that to the user.
# But we still allow specifying import group via config yaml.
auth[CONF_IMPORT_GROUPS] = self._import_groups
return await self._entry_from_data(auth)
except AuthError as err:
if err.code == "invalid_security_code":
errors[KEY_SECURITY_CODE] = err.code
else:
errors["base"] = err.code
else:
user_input = {}
fields = {}
if self._host is None:
fields[vol.Required(CONF_HOST, default=user_input.get(CONF_HOST))] = str
fields[
vol.Required(KEY_SECURITY_CODE, default=user_input.get(KEY_SECURITY_CODE))
] = str
return self.async_show_form(
step_id="auth", data_schema=vol.Schema(fields), errors=errors
)
async def async_step_homekit(self, discovery_info):
"""Handle homekit discovery."""
await self.async_set_unique_id(discovery_info["properties"]["id"])
self._abort_if_unique_id_configured({CONF_HOST: discovery_info["host"]})
host = discovery_info["host"]
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) != host:
continue
# Backwards compat, we update old entries
if not entry.unique_id:
self.hass.config_entries.async_update_entry(
entry, unique_id=discovery_info["properties"]["id"]
)
return self.async_abort(reason="already_configured")
self._host = host
return await self.async_step_auth()
async def async_step_import(self, user_input):
"""Import a config entry."""
self._async_abort_entries_match({CONF_HOST: user_input["host"]})
# Happens if user has host directly in configuration.yaml
if "key" not in user_input:
self._host = user_input["host"]
self._import_groups = user_input[CONF_IMPORT_GROUPS]
return await self.async_step_auth()
try:
data = await get_gateway_info(
self.hass,
user_input["host"],
# Old config format had a fixed identity
user_input.get("identity", "homeassistant"),
user_input["key"],
)
data[CONF_IMPORT_GROUPS] = user_input[CONF_IMPORT_GROUPS]
return await self._entry_from_data(data)
except AuthError:
# If we fail to connect, just pass it on to discovery
self._host = user_input["host"]
return await self.async_step_auth()
async def _entry_from_data(self, data):
"""Create an entry from data."""
host = data[CONF_HOST]
gateway_id = data[CONF_GATEWAY_ID]
same_hub_entries = [
entry.entry_id
for entry in self._async_current_entries()
if entry.data.get(CONF_GATEWAY_ID) == gateway_id
or entry.data.get(CONF_HOST) == host
]
if same_hub_entries:
await asyncio.wait(
[
self.hass.config_entries.async_remove(entry_id)
for entry_id in same_hub_entries
]
)
return self.async_create_entry(title=host, data=data)
async def authenticate(hass, host, security_code):
"""Authenticate with a Tradfri hub."""
identity = uuid4().hex
api_factory = await APIFactory.init(host, psk_id=identity)
try:
with async_timeout.timeout(5):
key = await api_factory.generate_psk(security_code)
except RequestError as err:
raise AuthError("invalid_security_code") from err
except asyncio.TimeoutError as err:
raise AuthError("timeout") from err
finally:
await api_factory.shutdown()
return await get_gateway_info(hass, host, identity, key)
async def get_gateway_info(hass, host, identity, key):
"""Return info for the gateway."""
try:
factory = await APIFactory.init(host, psk_id=identity, psk=key)
api = factory.request
gateway = Gateway()
gateway_info_result = await api(gateway.get_gateway_info())
await factory.shutdown()
except (OSError, RequestError) as err:
# We're also catching OSError as PyTradfri doesn't catch that one yet
# Upstream PR: https://github.com/ggravlingen/pytradfri/pull/189
raise AuthError("cannot_connect") from err
return {
CONF_HOST: host,
CONF_IDENTITY: identity,
CONF_KEY: key,
CONF_GATEWAY_ID: gateway_info_result.id,
}
|
the-stack_0_22261 | # pylint: disable=line-too-long,invalid-name
import warnings
from allennlp import predictors
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
class PretrainedModel:
"""
A pretrained model is determined by both an archive file
(representing the trained model)
and a choice of predictor.
"""
def __init__(self, archive_file: str, predictor_name: str) -> None:
self.archive_file = archive_file
self.predictor_name = predictor_name
def predictor(self) -> Predictor:
archive = load_archive(self.archive_file)
return Predictor.from_archive(archive, self.predictor_name)
# TODO(Mark): Figure out a way to make PretrainedModel generic on Predictor, so we can remove these type ignores.
#### Models in the demo ####
def srl_with_elmo_luheng_2018() -> predictors.SemanticRoleLabelerPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz',
'semantic-role-labeling')
return model.predictor() # type: ignore
def bidirectional_attention_flow_seo_2017() -> predictors.BidafPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/bidaf-model-2017.09.15-charpad.tar.gz',
'machine-comprehension')
return model.predictor() # type: ignore
def naqanet_dua_2019() -> predictors.BidafPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/naqanet-2019.03.01.tar.gz',
'machine-comprehension')
return model.predictor() # type: ignore
def open_information_extraction_stanovsky_2018() -> predictors.OpenIePredictor:
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/openie-model.2018-08-20.tar.gz',
'open-information-extraction')
return model.predictor() # type: ignore
def decomposable_attention_with_elmo_parikh_2017() -> predictors.DecomposableAttentionPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/decomposable-attention-elmo-2018.02.19.tar.gz',
'textual-entailment')
return model.predictor() # type: ignore
def neural_coreference_resolution_lee_2017() -> predictors.CorefPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/coref-model-2018.02.05.tar.gz',
'coreference-resolution')
return model.predictor() # type: ignore
def named_entity_recognition_with_elmo_peters_2018() -> predictors.SentenceTaggerPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/ner-model-2018.12.18.tar.gz',
'sentence-tagger')
predictor = model.predictor()
# pylint: disable=protected-access
predictor._dataset_reader._token_indexers['token_characters']._min_padding_length = 3 # type: ignore
return predictor # type: ignore
def fine_grained_named_entity_recognition_with_elmo_peters_2018() -> predictors.SentenceTaggerPredictor:
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/fine-grained-ner-model-elmo-2018.08.31.tar.gz',
'sentence-tagger')
predictor = model.predictor()
# pylint: disable=protected-access
predictor._dataset_reader._token_indexers['token_characters']._min_padding_length = 3 # type: ignore
return predictor # type: ignore
def span_based_constituency_parsing_with_elmo_joshi_2018() -> predictors.ConstituencyParserPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz',
'constituency-parser')
return model.predictor() # type: ignore
def biaffine_parser_stanford_dependencies_todzat_2017() -> predictors.BiaffineDependencyParserPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/biaffine-dependency-parser-ptb-2018.08.23.tar.gz',
'biaffine-dependency-parser')
return model.predictor() # type: ignore
#### Models not in the demo ####
def biaffine_parser_universal_dependencies_todzat_2017() -> predictors.BiaffineDependencyParserPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/biaffine-dependency-parser-ud-2018.08.23.tar.gz',
'biaffine-dependency-parser')
return model.predictor() # type: ignore
def esim_nli_with_elmo_chen_2017() -> predictors.DecomposableAttentionPredictor:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=DeprecationWarning)
model = PretrainedModel('https://s3-us-west-2.amazonaws.com/allennlp/models/esim-elmo-2018.05.17.tar.gz',
'textual-entailment')
return model.predictor() # type: ignore
|
the-stack_0_22262 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import re
from typing import List, Optional, Tuple
from sqlalchemy.engine.interfaces import Dialect
from sqlalchemy.types import String, TypeEngine, UnicodeText
from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
class MssqlEngineSpec(BaseEngineSpec):
engine = "mssql"
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
limit_method = LimitMethod.WRAP_SQL
max_column_name_length = 128
_time_grain_functions = {
None: "{col}",
"PT1S": "DATEADD(second, DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')",
"PT1M": "DATEADD(minute, DATEDIFF(minute, 0, {col}), 0)",
"PT5M": "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)",
"PT10M": "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 10 * 10, 0)",
"PT15M": "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 15 * 15, 0)",
"PT0.5H": "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 30 * 30, 0)",
"PT1H": "DATEADD(hour, DATEDIFF(hour, 0, {col}), 0)",
"P1D": "DATEADD(day, DATEDIFF(day, 0, {col}), 0)",
"P1W": "DATEADD(week, DATEDIFF(week, 0, {col}), 0)",
"P1M": "DATEADD(month, DATEDIFF(month, 0, {col}), 0)",
"P0.25Y": "DATEADD(quarter, DATEDIFF(quarter, 0, {col}), 0)",
"P1Y": "DATEADD(year, DATEDIFF(year, 0, {col}), 0)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
@classmethod
def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
data = super().fetch_data(cursor, limit)
if data and type(data[0]).__name__ == "Row":
data = [[elem for elem in r] for r in data]
return data
column_types = [
(String(), re.compile(r"^(?<!N)((VAR){0,1}CHAR|TEXT|STRING)", re.IGNORECASE)),
(UnicodeText(), re.compile(r"^N((VAR){0,1}CHAR|TEXT)", re.IGNORECASE)),
]
@classmethod
def get_sqla_column_type(cls, type_: str) -> Optional[TypeEngine]:
for sqla_type, regex in cls.column_types:
if regex.match(type_):
return sqla_type
return None
@classmethod
def column_datatype_to_string(
cls, sqla_column_type: TypeEngine, dialect: Dialect
) -> str:
datatype = super().column_datatype_to_string(sqla_column_type, dialect)
# MSSQL returns long overflowing datatype
# as in 'VARCHAR(255) COLLATE SQL_LATIN1_GENERAL_CP1_CI_AS'
# and we don't need the verbose collation type
str_cutoff = " COLLATE "
if str_cutoff in datatype:
datatype = datatype.split(str_cutoff)[0]
return datatype
|
the-stack_0_22263 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
try:
from IPython.core.magic import cell_magic, magics_class, Magics, needs_local_scope # type: ignore
@magics_class
class VizTracerMagics(Magics):
@needs_local_scope
@cell_magic
def viztracer(self, line, cell, local_ns):
from .viztracer import VizTracer
from .viewer import ServerThread
from IPython.display import display # type: ignore
from ipywidgets import Button # type: ignore
code = self.shell.transform_cell(cell)
file_path = "./viztracer_report.json"
with VizTracer(verbose=0, output_file=file_path):
exec(code, local_ns, local_ns)
server = ServerThread(file_path)
button = Button(description="VizTracer Report")
button.on_click(lambda b: server.view(file_path, once=True))
display(button)
except ImportError: # pragma: no cover
pass
def load_ipython_extension(ipython):
"""
Use `%load_ext viztracer`
"""
ipython.register_magics(VizTracerMagics)
|
the-stack_0_22264 | from numpy import dtype
from ..spec.spec import DtypeHelper
from ..utils import docval, getargs
__all__ = [
"Error",
"DtypeError",
"MissingError",
"ExpectedArrayError",
"ShapeError",
"MissingDataType",
"IllegalLinkError",
"IncorrectDataType",
"IncorrectQuantityError"
]
class Error:
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'reason', 'type': str, 'doc': 'the reason for the error'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
self.__name = getargs('name', kwargs)
self.__reason = getargs('reason', kwargs)
self.__location = getargs('location', kwargs)
if self.__location is not None:
self.__str = "%s (%s): %s" % (self.__name, self.__location, self.__reason)
else:
self.__str = "%s: %s" % (self.name, self.reason)
@property
def name(self):
return self.__name
@property
def reason(self):
return self.__reason
@property
def location(self):
return self.__location
@location.setter
def location(self, loc):
self.__location = loc
self.__str = "%s (%s): %s" % (self.__name, self.__location, self.__reason)
def __str__(self):
return self.__str
def __repr__(self):
return self.__str__()
class DtypeError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (dtype, type, str, list), 'doc': 'the expected dtype'},
{'name': 'received', 'type': (dtype, type, str, list), 'doc': 'the received dtype'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
if isinstance(expected, list):
expected = DtypeHelper.simplify_cpd_type(expected)
reason = "incorrect type - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class MissingError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
reason = "argument missing"
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class MissingDataType(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'data_type', 'type': str, 'doc': 'the missing data type'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None},
{'name': 'missing_dt_name', 'type': str, 'doc': 'the name of the missing data type', 'default': None})
def __init__(self, **kwargs):
name, data_type, missing_dt_name = getargs('name', 'data_type', 'missing_dt_name', kwargs)
self.__data_type = data_type
if missing_dt_name is not None:
reason = "missing data type %s (%s)" % (self.__data_type, missing_dt_name)
else:
reason = "missing data type %s" % self.__data_type
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
@property
def data_type(self):
return self.__data_type
class IncorrectQuantityError(Error):
"""A validation error indicating that a child group/dataset/link has the incorrect quantity of matching elements"""
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'data_type', 'type': str, 'doc': 'the data type which has the incorrect quantity'},
{'name': 'expected', 'type': (str, int), 'doc': 'the expected quantity'},
{'name': 'received', 'type': (str, int), 'doc': 'the received quantity'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name, data_type, expected, received = getargs('name', 'data_type', 'expected', 'received', kwargs)
reason = "expected a quantity of %s for data type %s, received %s" % (str(expected), data_type, str(received))
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class ExpectedArrayError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (tuple, list), 'doc': 'the expected shape'},
{'name': 'received', 'type': str, 'doc': 'the received data'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect shape - expected an array of shape '%s', got non-array data '%s'" % (expected, received)
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class ShapeError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (tuple, list), 'doc': 'the expected shape'},
{'name': 'received', 'type': (tuple, list), 'doc': 'the received shape'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect shape - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class IllegalLinkError(Error):
"""
A validation error for indicating that a link was used where an actual object
(i.e. a dataset or a group) must be used
"""
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
reason = "illegal use of link (linked object will not be validated)"
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
class IncorrectDataType(Error):
"""
A validation error for indicating that the incorrect data_type (not dtype) was used.
"""
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': str, 'doc': 'the expected data_type'},
{'name': 'received', 'type': str, 'doc': 'the received data_type'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect data_type - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super().__init__(name, reason, location=loc)
|
the-stack_0_22266 | from intermol.decorators import *
from abstract_nonbonded_type import *
class NonbondedLJCR1Type(AbstractNonbondedType):
@accepts_compatible_units(None, None, None,
units.kilojoules_per_mole * units.nanometers**(6),
units.kilojoules_per_mole * units.nanometers**(12))
def __init__(self, atom1, atom2, type, sigma, epsilon):
AbstractNonbondedType.__init__(self, atom1, atom2, type)
self.sigma = sigma
self.epsilon = epsilon
|
the-stack_0_22267 | # -*- coding: utf-8 -*
import xlrd
import pdfkit
import copy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
options = {
'page-size': 'A4',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
}
def readProfileTemplate():
file = open('profile.html',mode='r')
all_of_it = file.read()
file.close()
return all_of_it
loc = ("data.xlsx")
wb = xlrd.open_workbook(loc)#, #encoding_override="utf-8")
sheet = wb.sheet_by_index(0)
master_profile = readProfileTemplate()
flag = int(0)
profile1 = str()
profile2 = str()
profile3 = str()
counter = 0
for i in range(sheet.nrows):
if flag == 0:
profile1 = copy.copy(master_profile)
profile2 = copy.copy(master_profile)
profile3 = copy.copy(master_profile)
flag=1
#print(sheet.cell_value(i, 0))
profile1 = profile1.replace('#'+str(counter)+'#', str(sheet.cell_value(i, 1)))
profile2 = profile2.replace('#'+str(counter)+'#', str(sheet.cell_value(i, 2)))
profile3 = profile3.replace('#'+str(counter)+'#', str(sheet.cell_value(i, 3)))
counter += 1
if counter == 18:
pdfkit.from_string(profile1, '1.pdf', options)
pdfkit.from_string(profile2, '2.pdf', options)
pdfkit.from_string(profile3, '3.pdf', options) |
the-stack_0_22268 | # The probability a patient has cancer
# Bayesian Network Homework
from probability import BayesNet
T, F = True, False
burglary = BayesNet([
('Pollution', '', 0.1),
('Smoker', '', 0.3),
('Cancer', 'Pollution Smoker',
{(T, T): 0.05,
(T, F): 0.02,
(F, T): 0.03,
(F, F): 0.001}),
('XRay', 'Cancer', {T: 0.90, F: 0.20}),
('Dyspnoea', 'Cancer', {T: 0.65, F: 0.30})
])
burglary.label = 'Lung Cancer Probability'
examples = {
burglary: [
{'variable': 'Cancer',
'evidence': {'Dyspnoea':T,'Smoker':T},
},
{'variable': 'Smoker',
'evidence': {'Xray':T, 'Cancer':F},
},
{'variable': 'Pollution',
'evidence': {'Dyspnoea':T, 'XRay':T},
},
{'variable': 'XRay',
'evidence': {'Dyspnoea':T, 'Smoker':T},
},
],
}
|
the-stack_0_22270 | from typing_extensions import Literal
from mad_gui.utils.model_base import BaseStateModel, Property
MODES = Literal["investigate", "sync", "add", "modify", "remove"]
class UiState(BaseStateModel):
menu_collapsed = Property(True)
class PlotState(BaseStateModel):
x_range = Property((0, 1))
y_range = Property((0, 1))
x_range_max = Property((0, 1))
mode: MODES = Property("investigate", dtype=str)
|
the-stack_0_22271 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 15:12:25 2017
@author: jiahuei
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ptb_tokenizer import PTBTokenizer
from tqdm import tqdm
from PIL import Image
import numpy as np
import h5py, json
import os, sys, time, re
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CURR_DIR, '..', '..', 'common'))
import utils
pjoin = os.path.join
def _convert_split(split, include_restval):
if split == 'val':
return 'valid'
if include_restval and split == 'restval':
return 'train'
return split
def tokenise(dataset,
image_id_key='cocoid',
retokenise=False):
"""
Tokenise captions (optional), remove non-alphanumerics.
Args:
dataset: Dictionary object loaded from Karpathy's dataset JSON file.
image_id_key: String. Used to access `image_id` field.
retokenise: Boolean. Whether to retokenise the raw captions using
Stanford-CoreNLP-3.4.1.
Returns:
A dictionary with tokenised captions.
The dict is a list of length-5 dicts:
filepath : Unicode string.
image_id : Int
raw : Length-N list of Unicode strings.
split : Unicode string.
tokens : Length-N list of lists. Each list has M Unicode tokens.
"""
if retokenise:
print("\nINFO: Tokenising captions using PTB Tokenizer.\n")
utils.maybe_download_from_url(
r'http://central.maven.org/maven2/edu/stanford/nlp/stanford-corenlp/3.4.1/stanford-corenlp-3.4.1.jar',
CURR_DIR)
tokenizer = PTBTokenizer()
raw_list = []
for d in dataset['images']:
for s in d['sentences']:
raw_list.append(s['raw'])
tokenised_cap, tokenised_cap_w_punc = tokenizer.tokenize(raw_list)
tokenised_data = []
cap_id = 0
for d in dataset['images']:
if 'filepath' in d.keys():
filepath = os.path.join(d['filepath'], d['filename'])
else:
filepath = d['filename']
temp_dict = dict(split=d['split'],
filepath=filepath,
image_id=d[image_id_key],
raw=[],
tokens=[])
for s in d['sentences']:
temp_dict['raw'].append(s['raw'])
temp_dict['tokens'].append(
[unicode(w) for w in tokenised_cap[cap_id].split(' ')
if w != ''])
cap_id += 1
tokenised_data.append(temp_dict)
else:
print("\nINFO: Using tokenised captions.\n")
#pattern = re.compile(r'([^\s\w]|_)+', re.UNICODE) # matches non-alphanumerics
pattern = re.compile(r'([^\w]|_)+', re.UNICODE) # matches non-alphanumerics and whitespaces
tokenised_data = []
for d in dataset['images']:
if 'filepath' in d.keys():
filepath = os.path.join(d['filepath'], d['filename'])
else:
filepath = d['filename']
temp_dict = dict(split=d['split'],
filepath=filepath,
image_id=d[image_id_key],
raw=[],
tokens=[])
for s in d['sentences']:
temp_dict['raw'].append(s['raw'])
temp_list = []
for w in s['tokens']:
w = re.sub(pattern, '', w.lower())
if w != '': temp_list.append(w)
temp_dict['tokens'].append(temp_list)
tokenised_data.append(temp_dict)
return tokenised_data
def get_truncate_length(tokenised_dataset,
truncate_percentage,
include_restval=True):
"""
Calculates the maximum caption length such that truncated captions makes
up `truncate_precentage` of the training corpus.
Args:
tokenised_dataset: Dictionary from output of `tokenise()`.
truncate_percentage: The percentage of truncated captions.
include_restval: Boolean. Whether to include `restval` split.
Only applies to MS-COCO dataset.
Returns:
The maximum caption length.
"""
lengths = {}
num_captions = 0
for d in tokenised_dataset:
split = _convert_split(d['split'], include_restval)
if split == 'train':
for s in d['tokens']:
lengths[len(s)] = lengths.get(len(s), 0) + 1
num_captions += 1
truncate_length = 0
percentage = .0
for key, value in sorted(lengths.iteritems()):
if percentage > (100.0 - truncate_percentage):
truncate_length = key
break
percentage += lengths[key] / num_captions * 100
print("INFO: Captions longer than {} words will be truncated.\n".format(truncate_length))
return truncate_length
def build_vocab(tokenised_dataset,
word_count_thres,
caption_len_thres,
vocab_size=None,
include_restval=True,
pad_value=0,
include_GO_EOS_tokens=True):
"""
Builds the word-to-id and id-to-word dictionaries.
Args:
tokenised_dataset: Dictionary from output of `tokenise()`.
word_count_thres: Threshold for word occurrence. Words that appear
less than this threshold will be converted to <UNK> token.
caption_len_thres: Threshold for sentence length in words. Captions
with longer lengths are truncated.
include_restval: Boolean. Whether to include `restval` split.
Only applies to MS-COCO dataset.
pad_value: Value assigned to <PAD> token.
Returns:
Word-to-id and id-to-word dictionaries.
"""
print("INFO: Building vocabulary.\n")
assert pad_value >= -1
counts = {}
for d in tokenised_dataset:
#split = d['split']
#if _split_check(split, include_restval):
split = _convert_split(d['split'], include_restval)
if split == 'train':
for s in d['tokens']:
for w_count, w in enumerate(s):
if w_count < caption_len_thres:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
if vocab_size is None:
print("INFO: Vocab: Filtering out words with count less than {}.\n".format(
word_count_thres))
vocab = [w[1] for w in cw if counts[w[1]] >= word_count_thres]
else:
print("INFO: Vocab: Generating vocab with fixed size {}.\n".format(
vocab_size))
vocab = [w[1] for w in cw[:vocab_size]]
#vocab_count = [w for w in cw if counts[w[1]] >= WORD_COUNT_THRES]
#vocab_inv_freq = [1.0 - (w[0] / float(vocab_count[0][0])) for w in vocab_count]
#vocab_weights = [0.5 + (f * 1.5) for f in vocab_inv_freq]
wtoi = {}
itow = {}
idx = pad_value
wtoi['<PAD>'] = idx
itow[idx] = '<PAD>'
idx += 1
for w in vocab:
wtoi[w] = idx
itow[idx] = w
idx += 1
wtoi['<UNK>'] = idx
itow[idx] = '<UNK>'
idx += 1
if include_GO_EOS_tokens:
wtoi['<GO>'] = idx
itow[idx] = '<GO>'
idx += 1
wtoi['<EOS>'] = idx
itow[idx] = '<EOS>'
time.sleep(0.5)
return wtoi, itow
def tokenised_word_to_txt_V1(tokenised_dataset,
caption_len_thres,
include_restval=True):
"""
Builds the train, validation and test lists of texts.
Args:
tokenised_dataset: Dictionary from output of `tokenise()`.
caption_len_thres: Threshold for sentence length in words. Captions
with longer lengths are truncated.
include_restval: Boolean. Whether to include `restval` split.
Only applies to MS-COCO dataset.
Returns:
`train`, `valid` and `test` dictionaries.
"""
dataset = dict(train=[], valid=[], test=[])
for i, d in enumerate(tqdm(tokenised_dataset,
ncols=100, desc='Word-to-txt-V1')):
split = _convert_split(d['split'], include_restval)
if split == 'restval': continue
fp = d['filepath']
for tokens in d['tokens']:
sent = ' '.join(tokens[:caption_len_thres])
sent = '<GO> {} <EOS>'.format(sent)
sent_out = '{},{}'.format(fp, sent)
dataset[split].append(sent_out)
return dataset
def tokenised_word_to_txt_V2(tokenised_dataset,
caption_len_thres,
include_restval=True):
"""
Builds the train, validation and test lists of texts.
Args:
tokenised_dataset: Dictionary from output of `tokenise()`.
caption_len_thres: Threshold for sentence length in words. Captions
with longer lengths are truncated.
include_restval: Boolean. Whether to include `restval` split.
Only applies to MS-COCO dataset.
Returns:
`train`, `valid` and `test` dictionaries.
"""
dataset = dict(train=[], valid=[], test=[])
for i, d in enumerate(tqdm(tokenised_dataset,
ncols=100, desc='Word-to-txt-V2')):
split = _convert_split(d['split'], include_restval)
if split == 'restval': continue
fp = d['filepath']
for tokens in d['tokens']:
tokens = ['<GO>'] + tokens + ['<EOS>']
sent = ' '.join(tokens[:caption_len_thres + 2])
sent_out = '{},{}'.format(fp, sent)
dataset[split].append(sent_out)
return dataset
def serialise_everything(output_filepath,
image_dir,
image_size,
image_chunk_num,
word_to_txt_dict,
wtoi,
itow):
assert len(image_size) == 2
# Assert no overlaps between sets
train_set = set([s.split(',')[0] for s in word_to_txt_dict['train']])
valid_set = set([s.split(',')[0] for s in word_to_txt_dict['valid']])
test_set = set([s.split(',')[0] for s in word_to_txt_dict['test']])
assert not bool(train_set.intersection(valid_set))
assert not bool(train_set.intersection(test_set))
assert not bool(valid_set.intersection(test_set))
train_set = list(train_set)
valid_set = list(valid_set)
test_set = list(test_set)
with h5py.File('{}.h5'.format(output_filepath), 'w') as f:
sdt = h5py.special_dtype(vlen=unicode)
# Store dictionaries
f.create_dataset('wtoi', data=json.dumps(wtoi))
f.create_dataset('itow', data=json.dumps(itow))
# Store inference filepaths
d = f.create_dataset('filenames_valid', (len(valid_set),), dtype=sdt)
d[:] = valid_set
d = f.create_dataset('filenames_test', (len(test_set),), dtype=sdt)
d[:] = test_set
# Create index lookup and add image index
all_set = train_set + valid_set + test_set
idx = {}
for i, p in enumerate(all_set):
idx[p] = i
final_dict = {}
for split in word_to_txt_dict.keys():
final_dict[split] = []
for s in word_to_txt_dict[split]:
fidx = idx[s.split(',')[0]]
final_dict[split].append('{},{}'.format(fidx, s))
# Store captions used during training
for split in final_dict.keys():
captions = final_dict[split]
d = f.create_dataset(split, (len(captions),), dtype=sdt)
d[:] = captions
# Store decoded images as NumPy array
dsize = tuple([len(all_set)] + list(image_size) + [3])
chunks = tuple([image_chunk_num] + list(image_size) + [3])
d = f.create_dataset('images', dsize, chunks=chunks, dtype='uint8')
desc = 'INFO: h5py: Writing images'
for i, fname in enumerate(tqdm(all_set, ncols=100, desc=desc)):
fpath = pjoin(image_dir, fname)
img = Image.open(fpath)
img = img.resize(image_size, Image.BILINEAR)
img_arr = np.array(img)
assert img_arr.dtype == 'uint8'
err_mssg = 'Corrupted or unsupported image file: `{}`.'
if len(img_arr.shape) == 3:
if img_arr.shape[-1] == 3:
pass
elif img_arr.shape[-1] == 1:
img_arr = np.concatenate([img_arr] * 3, axis=2)
else:
raise ValueError(err_mssg.format(fpath))
elif len(img_arr.shape) == 2:
img_arr = np.stack([img_arr] * 3, axis=2)
else:
raise ValueError(err_mssg.format(fpath))
d[i, :, :, :] = img_arr
print('INFO: h5py: Dataset serialisation complete.\n')
def test_h5_file(filepath):
data = {}
with h5py.File(filepath, 'r') as f:
data['wtoi'] = json.loads(f['wtoi'][()])
data['itow'] = json.loads(f['itow'][()])
data['filenames_valid'] = list(f['filenames_valid'][:])
data['filenames_test'] = list(f['filenames_test'][:])
data['train'] = list(f['train'][:])
data['valid'] = list(f['valid'][:])
data['test'] = list(f['test'][:])
data['images'] = f['images'][:20]
for i in range(10):
img = Image.fromarray(data['images'][i])
img.save(pjoin(os.path.split(filepath)[0], 'img_{}.jpg'.format(i)))
return data
|
the-stack_0_22272 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, constraints, regularizers
from tensorflow.keras.layers import Input, Layer, Lambda, Dropout, Reshape, Embedding
from ..mapper.knowledge_graph import KGTripleGenerator, KGTripleSequence
from ..core.experimental import experimental
from ..core.validation import require_integer_in_range
class ComplExScore(Layer):
"""
ComplEx scoring Keras layer.
Original Paper: Complex Embeddings for Simple Link Prediction, Théo Trouillon, Johannes Welbl,
Sebastian Riedel, Éric Gaussier and Guillaume Bouchard, ICML
2016. http://jmlr.org/proceedings/papers/v48/trouillon16.pdf
This combines subject, relation and object embeddings into a score of the likelihood of the
link.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build(self, input_shape):
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs: a list of 6 tensors (``shape = batch size × 1 × embedding dimension k``), where
the three consecutive pairs represent real and imaginary parts of the subject,
relation and object embeddings, respectively, that is, ``inputs == [Re(subject),
Im(subject), Re(relation), ...]``
"""
s_re, s_im, r_re, r_im, o_re, o_im = inputs
def inner(r, s, o):
return tf.reduce_sum(r * s * o, axis=2)
# expansion of Re(<w_r, e_s, conjugate(e_o)>)
score = (
inner(r_re, s_re, o_re)
+ inner(r_re, s_im, o_im)
+ inner(r_im, s_re, o_im)
- inner(r_im, s_im, o_re)
)
return score
class ComplEx:
"""
Embedding layers and a ComplEx scoring layers that implement the ComplEx knowledge graph
embedding algorithm as in http://jmlr.org/proceedings/papers/v48/trouillon16.pdf
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embedding (that is, a vector in
``C^embedding_dimension`` is learnt for each node and each link type)
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings
(the default of random normal values matches the paper's reference implementation).
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="normal",
embeddings_regularizer=None,
):
if not isinstance(generator, KGTripleGenerator):
raise TypeError(
f"generator: expected KGTripleGenerator, found {type(generator).__name__}"
)
graph = generator.G
self.num_nodes = graph.number_of_nodes()
self.num_edge_types = len(graph._edges.types)
self.embedding_dimension = embedding_dimension
def embed(count):
return Embedding(
count,
embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
# ComplEx generates embeddings in C, which we model as separate real and imaginary
# embeddings
self._node_embeddings_real = embed(self.num_nodes)
self._node_embeddings_imag = embed(self.num_nodes)
self._edge_type_embeddings_real = embed(self.num_edge_types)
self._edge_type_embeddings_imag = embed(self.num_edge_types)
def embeddings(self):
"""
Retrieve the embeddings for nodes/entities and edge types/relations in this ComplEx model.
Returns:
A tuple of numpy complex arrays: the first element is the embeddings for nodes/entities
(``shape = number of nodes × k``), the second element is the embeddings for edge
types/relations (``shape = number of edge types x k``).
"""
node = 1j * self._node_embeddings_imag.embeddings.numpy()
node += self._node_embeddings_real.embeddings.numpy()
rel = 1j * self._edge_type_embeddings_imag.embeddings.numpy()
rel += self._edge_type_embeddings_real.embeddings.numpy()
return node, rel
def rank_edges_against_all_nodes(self, test_data, known_edges_graph):
"""
Returns the ranks of the true edges in ``test_data``, when scored against all other similar
edges.
For each input edge ``E = (s, r, o)``, the score of the *modified-object* edge ``(s, r, n)``
is computed for every node ``n`` in the graph, and similarly the score of the
*modified-subject* edge ``(n, r, o)``.
This computes "raw" and "filtered" ranks:
raw
The score of each edge is ranked against all of the modified-object and modified-subject
ones, for instance, if ``E = ("a", "X", "b")`` has score 3.14, and only one
modified-object edge has a higher score (e.g. ``F = ("a", "X", "c")``), then the raw
modified-object rank for ``E`` will be 2; if all of the ``(n, "X", "b")`` edges have score
less than 3.14, then the raw modified-subject rank for ``E`` will be 1.
filtered
The score of each edge is ranked against only the unknown modified-object and
modified-subject edges. An edge is considered known if it is in ``known_edges_graph``
which should typically hold every edge in the dataset (that is everything from the train,
test and validation sets, if the data has been split). For instance, continuing the raw
example, if the higher-scoring edge ``F`` is in the graph, then it will be ignored, giving
a filtered modified-object rank for ``E`` of 1. (If ``F`` was not in the graph, the
filtered modified-object rank would be 2.)
Args:
test_data: the output of :meth:`KGTripleGenerator.flow` on some test triples
known_edges_graph (StellarGraph):
a graph instance containing all known edges/triples
Returns:
A numpy array of integer raw ranks. It has shape ``N × 2``, where N is the number of
test triples in ``test_data``; the first column (``array[:, 0]``) holds the
modified-object ranks, and the second (``array[:, 1]``) holds the modified-subject
ranks.
"""
if not isinstance(test_data, KGTripleSequence):
raise TypeError(
"test_data: expected KGTripleSequence; found {type(test_data).__name__}"
)
num_nodes = known_edges_graph.number_of_nodes()
all_node_embs, all_rel_embs = self.embeddings()
all_node_embs_conj = all_node_embs.conj()
raws = []
filtereds = []
# run through the batches and compute the ranks for each one
num_tested = 0
for ((subjects, rels, objects),) in test_data:
num_tested += len(subjects)
# batch_size x k
ss = all_node_embs[subjects, :]
rs = all_rel_embs[rels, :]
os = all_node_embs[objects, :]
# reproduce the scoring function for ranking the given subject and relation against all
# other nodes (objects), and similarly given relation and object against all
# subjects. The bulk operations give speeeeeeeeed.
# (num_nodes x k, batch_size x k) -> num_nodes x batch_size
mod_o_pred = np.inner(all_node_embs_conj, ss * rs).real
mod_s_pred = np.inner(all_node_embs, rs * os.conj()).real
mod_o_raw, mod_o_filt = _ranks_from_score_columns(
mod_o_pred,
true_modified_node_ilocs=objects,
unmodified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=True,
known_edges_graph=known_edges_graph,
)
mod_s_raw, mod_s_filt = _ranks_from_score_columns(
mod_s_pred,
true_modified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=False,
unmodified_node_ilocs=objects,
known_edges_graph=known_edges_graph,
)
raws.append(np.column_stack((mod_o_raw, mod_s_raw)))
filtereds.append(np.column_stack((mod_o_filt, mod_s_filt)))
# make one big array
raw = np.concatenate(raws)
filtered = np.concatenate(filtereds)
# for each edge, there should be an pair of raw ranks
assert raw.shape == filtered.shape == (num_tested, 2)
return raw, filtered
def __call__(self, x):
"""
Apply embedding layers to the source, relation and object input "ilocs" (sequential integer
labels for the nodes and edge types).
Args:
x (list): list of 3 tensors (each batch size x 1) storing the ilocs of the subject,
relation and object elements for each edge in the batch.
"""
s_iloc, r_iloc, o_iloc = x
s_re = self._node_embeddings_real(s_iloc)
s_im = self._node_embeddings_imag(s_iloc)
r_re = self._edge_type_embeddings_real(r_iloc)
r_im = self._edge_type_embeddings_imag(r_iloc)
o_re = self._node_embeddings_real(o_iloc)
o_im = self._node_embeddings_imag(o_iloc)
scoring = ComplExScore()
return scoring([s_re, s_im, r_re, r_im, o_re, o_im])
def build(self):
"""
Builds a ComplEx model.
Returns:
A tuple of (list of input tensors, tensor for ComplEx model score outputs)
"""
s_iloc = Input(shape=1)
r_iloc = Input(shape=1)
o_iloc = Input(shape=1)
x_inp = [s_iloc, r_iloc, o_iloc]
x_out = self(x_inp)
return x_inp, x_out
class DistMultScore(Layer):
"""
DistMult scoring Keras layer.
Original Paper: Embedding Entities and Relations for Learning and Inference in Knowledge
Bases. Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, Li Deng. ICLR 2015
This combines subject, relation and object embeddings into a score of the likelihood of the
link.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build(self, input_shape):
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs: a list of 3 tensors (``shape = batch size × 1 × embedding dimension``),
representing the subject, relation and object embeddings, respectively, that is,
``inputs == [subject, relation, object]``
"""
y_e1, m_r, y_e2 = inputs
# y_e1^T M_r y_e2, where M_r = diag(m_r) is a diagonal matrix
score = tf.reduce_sum(y_e1 * m_r * y_e2, axis=2)
return score
class DistMult:
"""
Embedding layers and a DistMult scoring layers that implement the DistMult knowledge graph
embedding algorithm as in https://arxiv.org/pdf/1412.6575.pdf
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embedding (that is, a vector in
``R^embedding_dimension`` is learnt for each node and each link type)
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings.
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="uniform",
embeddings_regularizer=None,
):
if not isinstance(generator, KGTripleGenerator):
raise TypeError(
f"generator: expected KGTripleGenerator, found {type(generator).__name__}"
)
require_integer_in_range(embedding_dimension, "embedding_dimension", min_val=1)
graph = generator.G
self.num_nodes = graph.number_of_nodes()
self.num_edge_types = len(graph._edges.types)
self.embedding_dimension = embedding_dimension
def embed(count):
# FIXME(#980,https://github.com/tensorflow/tensorflow/issues/33755): embeddings can't use
# constraints to be normalized: per section 4 in the paper, the embeddings should be
# normalised to have unit norm.
return Embedding(
count,
embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
# DistMult generates embeddings in R
self._node_embeddings = embed(self.num_nodes)
self._edge_type_embeddings = embed(self.num_edge_types)
def embeddings(self):
"""
Retrieve the embeddings for nodes/entities and edge types/relations in this DistMult model.
Returns:
A tuple of numpy arrays: the first element is the embeddings for nodes/entities
(``shape = number of nodes × k``), the second element is the embeddings for edge
types/relations (``shape = number of edge types x k``).
"""
return (
self._node_embeddings.embeddings.numpy(),
self._edge_type_embeddings.embeddings.numpy(),
)
def rank_edges_against_all_nodes(self, test_data, known_edges_graph):
"""
Returns the ranks of the true edges in ``test_data``, when scored against all other similar
edges.
For each input edge ``E = (s, r, o)``, the score of the *modified-object* edge ``(s, r, n)``
is computed for every node ``n`` in the graph, and similarly the score of the
*modified-subject* edge ``(n, r, o)``.
This computes "raw" and "filtered" ranks:
raw
The score of each edge is ranked against all of the modified-object and modified-subject
ones, for instance, if ``E = ("a", "X", "b")`` has score 3.14, and only one
modified-object edge has a higher score (e.g. ``F = ("a", "X", "c")``), then the raw
modified-object rank for ``E`` will be 2; if all of the ``(n, "X", "b")`` edges have score
less than 3.14, then the raw modified-subject rank for ``E`` will be 1.
filtered
The score of each edge is ranked against only the unknown modified-object and
modified-subject edges. An edge is considered known if it is in ``known_edges_graph``
which should typically hold every edge in the dataset (that is everything from the train,
test and validation sets, if the data has been split). For instance, continuing the raw
example, if the higher-scoring edge ``F`` is in the graph, then it will be ignored, giving
a filtered modified-object rank for ``E`` of 1. (If ``F`` was not in the graph, the
filtered modified-object rank would be 2.)
Args:
test_data: the output of :meth:`KGTripleGenerator.flow` on some test triples
known_edges_graph (StellarGraph):
a graph instance containing all known edges/triples
Returns:
A numpy array of integer raw ranks. It has shape ``N × 2``, where N is the number of
test triples in ``test_data``; the first column (``array[:, 0]``) holds the
modified-object ranks, and the second (``array[:, 1]``) holds the modified-subject
ranks.
"""
if not isinstance(test_data, KGTripleSequence):
raise TypeError(
"test_data: expected KGTripleSequence; found {type(test_data).__name__}"
)
num_nodes = known_edges_graph.number_of_nodes()
all_node_embs, all_rel_embs = self.embeddings()
raws = []
filtereds = []
# run through the batches and compute the ranks for each one
num_tested = 0
for ((subjects, rels, objects),) in test_data:
num_tested += len(subjects)
# batch_size x k
ss = all_node_embs[subjects, :]
rs = all_rel_embs[rels, :]
os = all_node_embs[objects, :]
# reproduce the scoring function for ranking the given subject and relation against all
# other nodes (objects), and similarly given relation and object against all
# subjects. The bulk operations give speeeeeeeeed.
# (num_nodes x k, batch_size x k) -> num_nodes x batch_size
mod_o_pred = np.inner(all_node_embs, ss * rs)
mod_s_pred = np.inner(all_node_embs, rs * os)
mod_o_raw, mod_o_filt = _ranks_from_score_columns(
mod_o_pred,
true_modified_node_ilocs=objects,
unmodified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=True,
known_edges_graph=known_edges_graph,
)
mod_s_raw, mod_s_filt = _ranks_from_score_columns(
mod_s_pred,
true_modified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=False,
unmodified_node_ilocs=objects,
known_edges_graph=known_edges_graph,
)
raws.append(np.column_stack((mod_o_raw, mod_s_raw)))
filtereds.append(np.column_stack((mod_o_filt, mod_s_filt)))
# make one big array
raw = np.concatenate(raws)
filtered = np.concatenate(filtereds)
# for each edge, there should be an pair of raw ranks
assert raw.shape == filtered.shape == (num_tested, 2)
return raw, filtered
def __call__(self, x):
"""
Apply embedding layers to the source, relation and object input "ilocs" (sequential integer
labels for the nodes and edge types).
Args:
x (list): list of 3 tensors (``shape = batch size × 1``) storing the ilocs of the
subject, relation and object elements for each edge in the batch.
"""
e1_iloc, r_iloc, e2_iloc = x
y_e1 = self._node_embeddings(e1_iloc)
m_r = self._edge_type_embeddings(r_iloc)
y_e2 = self._node_embeddings(e2_iloc)
scoring = DistMultScore()
return scoring([y_e1, m_r, y_e2])
def build(self):
"""
Builds a DistMult model.
Returns:
A tuple of (list of input tensors, tensor for DistMult model score outputs)
"""
e1_iloc = Input(shape=(None,))
r_iloc = Input(shape=(None,))
e2_iloc = Input(shape=(None,))
x_inp = [e1_iloc, r_iloc, e2_iloc]
x_out = self(x_inp)
return x_inp, x_out
def _ranks_from_score_columns(
pred,
*,
true_modified_node_ilocs,
unmodified_node_ilocs,
true_rel_ilocs,
modified_object,
known_edges_graph,
):
"""
Compute the raw and filtered ranks of a set of true edges ``E = (s, r, o)`` against all
mutations of one end of them, e.g. ``E' = (s, r, n)`` for "modified-object".
The raw rank is the total number of edges scored higher than the true edge ``E``, and the
filtered rank is the total number of unknown edges (not in ``known_edges_graph``).
Args:
pred: a 2D array: each column represents the scores for a single true edge and its
mutations, where the row indicates the ``n`` in ``E'`` (e.g. row 0 corresponds to ``n``
= node with iloc 0)
true_modified_node_ilocs: an array of ilocs of the actual node that was modified, that is,
``o`` for modified-object and ``s`` for modified subject``, index ``i`` corresponds to
the iloc for column ``pred[:, i]``.
unmodified_node_ilocs: similar to ``true_modified_node_ilocs``, except for the other end of
the edge: the node that was not modified.
true_rel_ilocs: similar to ``true_modified_node_ilocs``, except for the relationship type of
the edge (``r``).
modified_object (bool): whether the object was modified (``True``), or the subject
(``False``)
known_edges_graph (StellarGraph): a graph containing all the known edges that should be
ignored when computing filtered ranks
Returns:
a tuple of raw ranks and filtered ranks, each is an array of integers >= 1 where index ``i``
corresponds to the rank of the true edge among all of the scores in column ``pred[:, i]``.
"""
batch_size = len(true_modified_node_ilocs)
assert pred.shape == (known_edges_graph.number_of_nodes(), batch_size)
assert unmodified_node_ilocs.shape == true_rel_ilocs.shape == (batch_size,)
# the score of the true edge, for each edge in the batch (this indexes in lock-step,
# i.e. [pred[true_modified_node_ilocs[0], range(batch_size)[0]], ...])
true_scores = pred[true_modified_node_ilocs, range(batch_size)]
# for each column, compare all the scores against the score of the true edge
greater = pred > true_scores
# the raw rank is the number of elements scored higher than the true edge
raw_rank = 1 + greater.sum(axis=0)
# the filtered rank is the number of unknown elements scored higher, where an element is
# known if the edge (s, r, n) (for modified-object) or (n, r, o) (for modified-subject)
# exists in known_edges_graph.
# FIXME(#870): this would be better without external IDs <-> ilocs translation
unmodified_nodes = known_edges_graph._nodes.ids.from_iloc(unmodified_node_ilocs)
true_rels = known_edges_graph._edges.types.from_iloc(true_rel_ilocs)
if modified_object:
neigh_func = known_edges_graph.out_nodes
else:
neigh_func = known_edges_graph.in_nodes
# collect all the neighbours into a single array to do one _get_index_for_nodes call,
# which has relatively high constant cost
neighbours = []
columns = []
for batch_column, (unmodified, r) in enumerate(zip(unmodified_nodes, true_rels)):
this_neighs = neigh_func(unmodified, edge_types=[r])
neighbours.extend(this_neighs)
columns.extend(batch_column for _ in this_neighs)
neighbour_ilocs = known_edges_graph._get_index_for_nodes(neighbours)
greater[neighbour_ilocs, columns] = False
filtered_rank = 1 + greater.sum(axis=0)
assert raw_rank.shape == filtered_rank.shape == (batch_size,)
return raw_rank, filtered_rank
|
the-stack_0_22275 | # ======================================================================#
# Functions to handle sequences for people who hold a grudge against
# Biopython:
# - load fasta file
# - generate reverse complement
# ======================================================================#
import Bio
from Bio import SeqIO
def parse_fasta(fasta_file) -> dict:
"""
Reads a fasta file.
"""
fasta = open(fasta_file, "r")
seq_record = {}
# Part 1: compile list of lines per sequence
for line in fasta:
if ">" in line:
# new name line; remember current sequence's short name
short_name = line.strip().split()[0]
seq_record[short_name] = ""
else:
# append nucleotides to current sequence
seq_record[short_name] = seq_record[short_name] + line.strip()
return seq_record
def rev_comp(seq: str) -> str:
"""
Generates the reverse complement of a sequence.
"""
comp = {
"A": "T",
"C": "G",
"G": "C",
"T": "A",
"B": "N",
"N": "N",
"R": "N",
"M": "N",
"Y": "N",
"S": "N",
"W": "N",
"K": "N",
"a": "t",
"c": "g",
"g": "c",
"t": "a",
"n": "n",
" ": "",
}
rev_seq = "".join(comp.get(base, base) for base in reversed(seq))
return rev_seq
def make_seq_length_file(assembly, output_file, namelist_file="abc"):
"""
Compute sequences length.
"""
seq_data = SeqIO.parse(assembly, "fasta")
outlength = open(output_file, "w")
if namelist_file != "abc":
namelist = open(namelist_file, "w")
for record in seq_data:
outlength.write("{0}\t{1}\n".format(record.id, len(record.seq)))
if namelist_file != "abc":
namelist.write("{0}\n".format(record.id))
outlength.close()
namelist.close()
|
the-stack_0_22277 | from unittest import TestCase
from zmodulo.plot.text.font_size import FontSize
__author__ = 'aruff'
class TestFontSize(TestCase):
""" Tester for the plot text FontSize
"""
def test_default_declaration_generation(self):
"""
Tests the generation of a font size declaration with
no parameters
"""
font_size = FontSize()
self.assertEqual(font_size.to_str(), '\tfontsize = 20;\n')
def test_string_named_value_declaration_generation(self):
"""
Tests the generation of a font size declaration with
string parameters
"""
font_size = FontSize("Pooky")
self.assertEqual(font_size.to_str(), '\tfontsize = Pooky;\n')
if __name__ == '__main__':
TestCase.main()
|
the-stack_0_22279 | import warnings
import pytest
from vectorhub.auto_encoder import AutoEncoder, ENCODER_MAPPINGS, list_all_auto_models, BIENCODER_MAPPINGS, AutoBiEncoder
from .test_utils import *
@pytest.mark.audio
@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3])
def test_encoders_instantiation_audio(name):
if 'audio' in name:
encoder = AutoEncoder.from_model(name)
assert_encoder_works(encoder, data_type='audio')
else:
# Default to test passing otherwise
assert True
@pytest.mark.text
@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3])
def test_encoders_instantiation_text(name):
if name not in ['text/use-lite', 'text/elmo']:
if 'text' in name:
encoder = AutoEncoder.from_model(name)
assert_encoder_works(encoder, data_type='text')
else:
# Default to test passing otherwise
assert True
@pytest.mark.image
@pytest.mark.parametrize('name', list(ENCODER_MAPPINGS.keys())[0:3])
def test_encoders_instantiation_image(name):
if 'image' in name:
encoder = AutoEncoder.from_model(name)
assert_encoder_works(encoder, data_type='image')
if 'fastai' not in name:
sample = encoder.to_grayscale(encoder.read('https://getvectorai.com/_nuxt/img/dog-1.3cc5fe1.png'))
result = encoder.encode(sample)
assert not is_dummy_vector(result)
else:
# Default to test passing otherwise
assert True
@pytest.mark.text
@pytest.mark.parametrize('name', list(BIENCODER_MAPPINGS.keys())[0:3])
def test_auto_biencoders(name):
if 'qa' in name:
bi_encoder = AutoBiEncoder.from_model(name)
assert_encoder_works(bi_encoder, data_type='text', model_type='bi_encoder')
def test_listing_all_models():
"""
Simple test to ensure model listing works.
"""
assert len(list_all_auto_models()) > 1
|
the-stack_0_22282 | #! coding:utf-8
# python2 requires: pip install futures
import atexit
from concurrent.futures import (ProcessPoolExecutor, ThreadPoolExecutor,
as_completed)
from concurrent.futures._base import (CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED, PENDING, RUNNING,
CancelledError, Error, Executor, Future,
TimeoutError)
from concurrent.futures.thread import _threads_queues, _WorkItem
from functools import wraps
from logging import getLogger
from threading import Thread, Timer
from time import sleep
from time import time as time_time
from weakref import WeakSet
from requests import PreparedRequest, RequestException, Session
from requests.adapters import HTTPAdapter
from urllib3 import disable_warnings
from .configs import Config
from .exceptions import FailureException, ValidationError
from .frequency_controller.sync_tools import Frequency
from .versions import PY2, PY3
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
if PY3:
from concurrent.futures.process import BrokenProcessPool
__all__ = [
"Pool", "ProcessPool", "NewFuture", "Async", "threads",
"get_results_generator", "run_after_async", "tPool", "get", "post",
"options", "delete", "put", "head", "patch", "request", "disable_warnings",
"Workshop"
]
logger = getLogger("torequests")
def _abandon_all_tasks():
"""Only used for abandon_all_tasks and exit the main thread,
to prevent the main thread waiting for unclosed thread while exiting."""
_threads_queues.clear()
def ensure_waiting_for_threads():
if Config.wait_futures_before_exiting:
_abandon_all_tasks()
atexit.register(ensure_waiting_for_threads)
class NewExecutorPoolMixin(Executor):
"""Add async_func decorator for wrapping a function to return the NewFuture."""
def async_func(self, function):
"""Decorator for let a normal function return the NewFuture"""
@wraps(function)
def wrapped(*args, **kwargs):
return self.submit(function, *args, **kwargs)
return wrapped
def close(self, wait=True):
"""Same as self.shutdown"""
return self.shutdown(wait=wait)
def _get_cpu_count(self):
"""Get the cpu count."""
try:
from multiprocessing import cpu_count
return cpu_count()
except Exception as e:
logger.error("_get_cpu_count failed for %s" % e)
@property
def x(self):
"""Return self.wait_futures_done"""
return self.wait_futures_done(list(self._all_futures))
def wait_futures_done(self, tasks=None):
# ignore the order of tasks
tasks = tasks or self._all_futures
fs = []
try:
for f in as_completed(tasks, timeout=self._timeout):
fs.append(f.x)
except TimeoutError:
pass
return fs
class Pool(ThreadPoolExecutor, NewExecutorPoolMixin):
"""Let ThreadPoolExecutor use NewFuture instead of origin concurrent.futures.Future.
WARNING: NewFutures in Pool will not block main thread without NewFuture.x.
Basic Usage::
from torequests.main import Pool
import time
pool = Pool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@pool.async_func
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = (self._get_cpu_count() or 1) * 5
super(Pool, self).__init__(n, *args, **kwargs)
#: set the default timeout
self._timeout = timeout
#: set the default_callback if not set single task's callback
self.default_callback = default_callback
#: WeakSet of _all_futures for self.x
self._all_futures = WeakSet()
#: catch_exception=True will not raise exceptions, return object FailureException(exception)
self.catch_exception = catch_exception
@property
def all_tasks(self):
"""Keep the same api for dummy, return self._all_futures actually"""
return self._all_futures
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._all_futures.add(future)
return future
class ProcessPool(ProcessPoolExecutor, NewExecutorPoolMixin):
"""Simple ProcessPool covered ProcessPoolExecutor.
::
from torequests.main import ProcessPool
import time
pool = ProcessPool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
def main():
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
if __name__ == '__main__':
main()
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0']
# use_submit: 0
# use_submit: 1
# use_submit: 2
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = self._get_cpu_count() or 1
super(ProcessPool, self).__init__(n, *args, **kwargs)
self._timeout = timeout
self.default_callback = default_callback
self._all_futures = WeakSet()
self.catch_exception = catch_exception
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if PY3 and self._broken:
raise BrokenProcessPool(
"A child process terminated "
"abruptly, the process pool is not usable anymore")
if self._shutdown_thread:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._result_queue.put(None)
self._start_queue_management_thread()
if PY2:
self._adjust_process_count()
self._all_futures.add(future)
return future
def async_func(self, *args):
"""Decorator mode not support for ProcessPool for _pickle.PicklingError."""
raise NotImplementedError
class NewFuture(Future):
"""Add `.x` attribute and timeout args for original Future class
WARNING: Future thread will not stop running until function finished or pid killed.
:attr cx: blocking until the task finish and return the callback_result.
:attr x: blocking until the task finish and return the value as `coro` returned.
:attr task_start_time: timestamp when the task start up.
:attr task_end_time: timestamp when the task end up.
:attr task_cost_time: seconds of task costs.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
"""
if PY3:
from ._py3_patch import _new_future_await
__await__ = _new_future_await
def __init__(self,
timeout=None,
args=None,
kwargs=None,
callback=None,
catch_exception=True):
super(NewFuture, self).__init__()
self._timeout = timeout
self._args = args or ()
self._kwargs = kwargs or {}
self._callback_result = None
self.catch_exception = catch_exception
self.task_start_time = time_time()
self.task_end_time = 0
self.task_cost_time = 0
self._user_callbacks = set()
if callback:
if not isinstance(callback, (list, tuple)):
callback = [callback]
for fn in callback:
self.add_done_callback(fn)
self._user_callbacks.add(fn)
def __getattr__(self, name):
return getattr(self.x, name)
def _invoke_callbacks(self):
"""Record the task_end_time & task_cost_time, set result for self._callback_result."""
self.task_end_time = time_time()
self.task_cost_time = self.task_end_time - self.task_start_time
with self._condition:
for callback in self._done_callbacks:
try:
result = callback(self)
if callback in self._user_callbacks:
self._callback_result = result
except Exception as e:
logger.error("exception calling callback for %s" % e)
self._condition.notify_all()
@property
def _callbacks(self):
"""Keep same api for NewTask."""
return self._done_callbacks
@property
def cx(self):
"""Block the main thead until future finish, return the future.callback_result."""
return self.callback_result
@property
def callback_result(self):
"""Block the main thead until future finish, return the future.callback_result."""
if self._state in [PENDING, RUNNING]:
self.x
if self._user_callbacks:
return self._callback_result
else:
return self.x
@property
def x(self):
"""Block the main thead until future finish, return the future.result()."""
with self._condition:
result = None
if not self.done():
self._condition.wait(self._timeout)
if not self.done():
# timeout
self.set_exception(TimeoutError())
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
# cancelled
result = CancelledError()
elif self._state == FINISHED:
# finished
if self._exception:
result = self._exception
else:
result = self._result
if isinstance(result, Exception):
if self.catch_exception:
result = FailureException(result)
return result
else:
raise result
return result
def Async(f, n=None, timeout=None):
"""Concise usage for pool.submit.
Basic Usage Asnyc & threads ::
from torequests.main import Async, threads
import time
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@threads()
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
new_use_submit = Async(use_submit)
tasks = [new_use_submit(i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
print([type(i) for i in tasks])
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>]
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
return threads(n=n, timeout=timeout)(f)
def threads(n=None, timeout=None):
"""Decorator usage like Async."""
return Pool(n, timeout).async_func
def get_results_generator(future_list, timeout=None, sort_by_completed=False):
"""Return as a generator of tasks order by completed sequence."""
try:
# python2 not support yield from
if sort_by_completed:
for future in as_completed(future_list, timeout=timeout):
yield future.x
else:
for future in future_list:
yield future.x
except TimeoutError:
return
def run_after_async(seconds, func, *args, **kwargs):
"""Run the function after seconds asynchronously."""
t = Timer(seconds, func, args, kwargs)
t.daemon = True
t.start()
return t
class FailedRequest(PreparedRequest):
allow_keys = {
"method",
"url",
"headers",
"files",
"data",
"params",
"auth",
"cookies",
"hooks",
"json",
}
def __init__(self, **kwargs):
# self.kwargs for retry tPool.request
self.kwargs = kwargs
filted_kwargs = {
key: value
for key, value in kwargs.items()
if key in self.allow_keys
}
super(FailedRequest, self).__init__()
self.prepare(**filted_kwargs)
class tPool(object):
"""Async wrapper for requests.
:param n: thread pool size for concurrent limit.
:param interval: time.sleep(interval) after each task finished.
:param timeout: timeout for each task.result(timeout). But it will not shutdown the raw funtion.
:param session: individually given a available requests.Session instance if necessary.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
:param default_callback: default_callback for tasks which not set callback param.
Usage::
from torequests.main import tPool
from torequests.logs import print_info
trequests = tPool(2, 1)
test_url = 'http://p.3.cn'
ss = [
trequests.get(
test_url,
retry=2,
callback=lambda x: (len(x.content), print_info(len(x.content))))
for i in range(3)
]
# or [i.x for i in ss]
trequests.x
ss = [i.cx for i in ss]
print_info(ss)
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(16): [(612, None), (612, None), (612, None)]
"""
def __init__(
self,
n=None,
interval=0,
timeout=None,
session=None,
catch_exception=True,
default_callback=None,
retry_exceptions=(RequestException, Error),
):
self.pool = Pool(n, timeout)
self.session = session if session else Session()
self.n = n or 10
# adapt the concurrent limit.
custom_adapter = HTTPAdapter(pool_connections=self.n,
pool_maxsize=self.n)
self.session.mount("http://", custom_adapter)
self.session.mount("https://", custom_adapter)
self.interval = interval
self.catch_exception = catch_exception
self.default_callback = default_callback
self.frequency = Frequency(self.n, self.interval)
self.retry_exceptions = retry_exceptions
@property
def all_tasks(self):
"""Return self.pool._all_futures"""
return self.pool._all_futures
@property
def x(self):
"""Return self.pool.x"""
return self.pool.x
def close(self, wait=False):
"""Close session, shutdown pool."""
self.session.close()
self.pool.shutdown(wait=wait)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
def _request(self,
method,
url,
retry=0,
response_validator=None,
retry_interval=0,
**kwargs):
if not url:
raise ValueError("url should not be null, but given: %s" % url)
kwargs["url"] = url
kwargs["method"] = method
# non-official request args
referer_info = kwargs.pop("referer_info", None)
encoding = kwargs.pop("encoding", None)
error = Exception()
for _ in range(retry + 1):
with self.frequency:
try:
resp = self.session.request(**kwargs)
if encoding:
resp.encoding = encoding
logger.debug("%s done, %s" % (url, kwargs))
resp.referer_info = referer_info
if response_validator and not response_validator(resp):
raise ValidationError(response_validator.__name__)
return resp
except self.retry_exceptions as e:
error = e
logger.debug(
"Retry %s for the %s time, Exception: %r . kwargs= %s" %
(url, _ + 1, e, kwargs))
if retry_interval:
sleep(retry_interval)
continue
# for unofficial request args
kwargs["retry"] = retry
if referer_info:
kwargs["referer_info"] = referer_info
if encoding:
kwargs["encoding"] = encoding
logger.debug("Retry %s times failed again: %s." % (retry, error))
failure = FailureException(error)
failure.request = FailedRequest(**kwargs)
if self.catch_exception:
return failure
else:
raise failure
def request(self,
method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.request`, but return as NewFuture."""
return self.pool.submit(self._request,
method=method,
url=url,
retry=retry,
response_validator=response_validator,
callback=callback or self.default_callback,
**kwargs)
def get(self,
url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.get`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("get",
url=url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(self,
url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.post`, but return as NewFuture."""
return self.request("post",
url=url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.delete`, but return as NewFuture."""
return self.request("delete",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(self,
url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.put`, but return as NewFuture."""
return self.request("put",
url=url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(self,
url,
callback=None,
retry=0,
response_validator=None,
allow_redirects=False,
**kwargs):
"""Similar to `requests.head`, but return as NewFuture."""
kwargs['allow_redirects'] = allow_redirects
return self.request("head",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.options`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("options",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.patch`, but return as NewFuture."""
return self.request("patch",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def get(url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().get(url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().post(url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().delete(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().put(url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().head(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().options(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().patch(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def request(method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().request(method,
url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
class Workshop:
"""Simple solution for producer-consumer problem.
WARNING: callback should has its own timeout to avoid blocking to long.
Demo::
import time
from torequests.main import Workshop
def callback(todo, worker_arg):
time.sleep(todo)
if worker_arg == 'worker1':
return None
return [todo, worker_arg]
fc = Workshop(range(1, 5), ['worker1', 'worker2', 'worker3'], callback)
for i in fc.get_result_as_completed():
print(i)
# [2, 'worker2']
# [3, 'worker3']
# [1, 'worker2']
# [4, 'worker3']
for i in fc.get_result_as_sequence():
print(i)
# [1, 'worker3']
# [2, 'worker3']
# [3, 'worker3']
# [4, 'worker2']
"""
def __init__(self,
todo_args,
worker_args,
callback,
timeout=None,
wait_empty_secs=1,
handle_exceptions=(),
max_failure=None,
fail_returned=None):
"""
:param todo_args: args to be send to callback
:type todo_args: List[Any]
:param worker_args: args for launching worker threads, you can use like [worker1, worker1, worker1] for concurrent workers
:type worker_args: List[Any]
:param callback: callback to consume the todo_arg from queue, handle args like callback(todo_arg, worker_arg)
:type callback: Callable
:param timeout: timeout for worker running, defaults to None
:type timeout: [float, int], optional
:param wait_empty_secs: seconds to sleep while queue is Empty, defaults to 1
:type wait_empty_secs: float, optional
:param handle_exceptions: ignore Exceptions raise from callback, defaults to ()
:type handle_exceptions: Tuple[Exception], optional
:param max_failure: stop worker while failing too many times, defaults to None
:type max_failure: int, optional
:param fail_returned: returned from callback will be treated as a failure, defaults to None
:type fail_returned: Any, optional
"""
self.q = Queue()
self.futures = self.init_futures(todo_args)
self.worker_args = worker_args
self.callback = callback
self.timeout = timeout or float('inf')
self.wait_empty_secs = wait_empty_secs
self.result = None
self.handle_exceptions = handle_exceptions
self.max_failure = float('inf') if max_failure is None else max_failure
self.fail_returned = fail_returned
self._done = False
self._done_signal = object()
def init_futures(self, todo_args):
futures = []
for arg in todo_args:
f = Future()
f.arg = arg
futures.append(f)
self.q.put(f)
return futures
def run(self, as_completed=False):
"""run until all tasks finished"""
if as_completed:
return list(self.get_result_as_completed())
return list(self.get_result_as_sequence())
def get_result_as_sequence(self):
"""return a generator of results with same sequence as self.todo_args"""
self.start_workers()
for f in self.futures:
yield f.result()
def get_result_as_completed(self):
"""return a generator of results as completed sequence"""
self.start_workers()
for f in as_completed(self.futures):
yield f.result()
@property
def done(self):
self._done = self._done or all((f.done() for f in self.futures))
return self._done
def worker(self, worker_arg):
fails = 0
start_time = time_time()
while time_time(
) - start_time < self.timeout and fails <= self.max_failure:
try:
f = self.q.get(timeout=self.wait_empty_secs)
if f is self._done_signal:
break
except TimeoutError:
if self.done:
break
fails += 1
continue
try:
result = self.callback(f.arg, worker_arg)
except self.handle_exceptions as err:
logger.error(
'Raised {err!r}, worker_arg: {worker_arg}, todo_arg: {arg}'.
format_map(
dict(err=err,
worker_arg=repr(worker_arg)[:100],
arg=repr(f.arg)[:100])))
result = self.fail_returned
if result == self.fail_returned:
self.q.put(f)
fails += 1
sleep(self.wait_empty_secs)
continue
else:
f.set_result(result)
if fails > 0:
fails -= 1
self.q.put_nowait
def start_workers(self):
self._done = False
for worker_arg in self.worker_args:
t = Thread(target=self.worker, args=(worker_arg,))
t.daemon = True
t.start()
|
the-stack_0_22283 | """ApacheParser is a member object of the ApacheConfigurator class."""
import copy
import fnmatch
import logging
import re
from typing import Dict
from typing import List
from certbot import errors
from certbot.compat import os
from certbot_apache._internal import apache_util
from certbot_apache._internal import constants
logger = logging.getLogger(__name__)
class ApacheParser:
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = {"*", "?", "\\", "[", "]"}
def __init__(self, root, vhostroot=None, version=(2, 4),
configurator=None):
# Note: Order is important here.
# Needed for calling save() with reverter functionality that resides in
# AugeasConfigurator superclass of ApacheConfigurator. This resolves
# issues with aug.load() after adding new files / defines to parse tree
self.configurator = configurator
# Initialize augeas
self.aug = None
self.init_augeas()
if not self.check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
self.modules: Dict[str, str] = {}
self.parser_paths: Dict[str, List[str]] = {}
self.variables: Dict[str, str] = {}
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self.parse_file(self.loc["root"])
if version >= (2, 4):
# Look up variables from httpd and add to DOM if not already parsed
self.update_runtime_variables()
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Parse LoadModule directives from configuration files
self.parse_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# list of the active include paths, before modifications
self.existing_paths = copy.deepcopy(self.parser_paths)
# Must also attempt to parse additional virtual host root
if vhostroot:
self.parse_file(os.path.abspath(vhostroot) + "/" +
self.configurator.option("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def init_augeas(self):
""" Initialize the actual Augeas instance """
try:
import augeas
except ImportError: # pragma: no cover
raise errors.NoInstallationError("Problem in Augeas installation")
self.aug = augeas.Augeas(
# specify a directory to load our preferred lens from
loadpath=constants.AUGEAS_LENS_DIR,
# Do not save backup (we do it ourselves), do not load
# anything by default
flags=(augeas.Augeas.NONE |
augeas.Augeas.NO_MODL_AUTOLOAD |
augeas.Augeas.ENABLE_SPAN))
def check_parsing_errors(self, lens):
"""Verify Augeas can parse all of the lens files.
:param str lens: lens to check for errors
:raises .errors.PluginError: If there has been an error in parsing with
the specified lens.
"""
error_files = self.aug.match("/augeas//error")
for path in error_files:
# Check to see if it was an error resulting from the use of
# the httpd lens
lens_path = self.aug.get(path + "/lens")
# As aug.get may return null
if lens_path and lens in lens_path:
msg = (
"There has been an error in parsing the file {0} on line {1}: "
"{2}".format(
# Strip off /augeas/files and /error
path[13:len(path) - 6],
self.aug.get(path + "/line"),
self.aug.get(path + "/message")))
raise errors.PluginError(msg)
def check_aug_version(self):
""" Checks that we have recent enough version of libaugeas.
If augeas version is recent enough, it will support case insensitive
regexp matching"""
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def unsaved_files(self):
"""Lists files that have modified Augeas DOM but the changes have not
been written to the filesystem yet, used by `self.save()` and
ApacheConfigurator to check the file state.
:raises .errors.PluginError: If there was an error in Augeas, in
an attempt to save the configuration, or an error creating a
checkpoint
:returns: `set` of unsaved files
"""
save_state = self.aug.get("/augeas/save")
self.aug.set("/augeas/save", "noop")
# Existing Errors
ex_errs = self.aug.match("/augeas//error")
try:
# This is a noop save
self.aug.save()
except (RuntimeError, IOError):
self._log_save_errors(ex_errs)
# Erase Save Notes
self.configurator.save_notes = ""
raise errors.PluginError(
"Error saving files, check logs for more info.")
# Return the original save method
self.aug.set("/augeas/save", save_state)
# Retrieve list of modified files
# Note: Noop saves can cause the file to be listed twice, I used a
# set to remove this possibility. This is a known augeas 0.10 error.
save_paths = self.aug.match("/augeas/events/saved")
save_files = set()
if save_paths:
for path in save_paths:
save_files.add(self.aug.get(path)[6:])
return save_files
def ensure_augeas_state(self):
"""Makes sure that all Augeas dom changes are written to files to avoid
loss of configuration directives when doing additional augeas parsing,
causing a possible augeas.load() resulting dom reset
"""
if self.unsaved_files():
self.configurator.save_notes += "(autosave)"
self.configurator.save()
def save(self, save_files):
"""Saves all changes to the configuration files.
save() is called from ApacheConfigurator to handle the parser specific
tasks of saving.
:param list save_files: list of strings of file paths that we need to save.
"""
self.configurator.save_notes = ""
self.aug.save()
# Force reload if files were modified
# This is needed to recalculate augeas directive span
if save_files:
for sf in save_files:
self.aug.remove("/files/"+sf)
self.aug.load()
def _log_save_errors(self, ex_errs):
"""Log errors due to bad Augeas save.
:param list ex_errs: Existing errors before save
"""
# Check for the root of save problems
new_errs = self.aug.match("/augeas//error")
# logger.error("During Save - %s", mod_conf)
logger.error("Unable to save files: %s. Attempted Save Notes: %s",
", ".join(err[13:len(err) - 6] for err in new_errs
# Only new errors caused by recent save
if err not in ex_errs), self.configurator.save_notes)
def add_include(self, main_config, inc_path):
"""Add Include for a new configuration file if one does not exist
:param str main_config: file path to main Apache config file
:param str inc_path: path of file to include
"""
if not self.find_dir(case_i("Include"), inc_path):
logger.debug("Adding Include %s to %s",
inc_path, get_aug_path(main_config))
self.add_dir(
get_aug_path(main_config),
"Include", inc_path)
# Add new path to parser paths
new_dir = os.path.dirname(inc_path)
new_file = os.path.basename(inc_path)
self.existing_paths.setdefault(new_dir, []).append(new_file)
def add_mod(self, mod_name):
"""Shortcut for updating parser modules."""
if mod_name + "_module" not in self.modules:
self.modules[mod_name + "_module"] = None
if "mod_" + mod_name + ".c" not in self.modules:
self.modules["mod_" + mod_name + ".c"] = None
def reset_modules(self):
"""Reset the loaded modules list. This is called from cleanup to clear
temporarily loaded modules."""
self.modules = {}
self.update_modules()
self.parse_modules()
def parse_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
mods: Dict[str, str] = {}
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(mods) != prev_size:
prev_size = len(mods)
for match_name, match_filename in zip(
iterator, iterator):
mod_name = self.get_arg(match_name)
mod_filename = self.get_arg(match_filename)
if mod_name and mod_filename:
mods[mod_name] = mod_filename
mods[os.path.basename(mod_filename)[:-2] + "c"] = mod_filename
else:
logger.debug("Could not read LoadModule directive from Augeas path: %s",
match_name[6:])
self.modules.update(mods)
def update_runtime_variables(self):
"""Update Includes, Defines and Includes from httpd config dump data"""
self.update_defines()
self.update_includes()
self.update_modules()
def update_defines(self):
"""Updates the dictionary of known variables in the configuration"""
self.variables = apache_util.parse_defines(self.configurator.option("ctl"))
def update_includes(self):
"""Get includes from httpd process, and add them to DOM if needed"""
# Find_dir iterates over configuration for Include and IncludeOptional
# directives to make sure we see the full include tree present in the
# configuration files
_ = self.find_dir("Include")
matches = apache_util.parse_includes(self.configurator.option("ctl"))
if matches:
for i in matches:
if not self.parsed_in_current(i):
self.parse_file(i)
def update_modules(self):
"""Get loaded modules from httpd process, and add them to DOM"""
matches = apache_util.parse_modules(self.configurator.option("ctl"))
for mod in matches:
self.add_mod(mod.strip())
def filter_args_num(self, matches, args):
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i, match in enumerate(matches):
if match.endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i, match in enumerate(matches):
if match.endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self.get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def get_ifmod(self, aug_conf_path, mod, beginning=False):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
:param bool beginning: If the IfModule should be created to the beginning
of augeas path DOM tree.
:returns: Augeas path of the requested IfModule directive that pre-existed
or was created during the process. The path may be dynamic,
i.e. .../IfModule[last()]
:rtype: str
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if not if_mods:
return self.create_ifmod(aug_conf_path, mod, beginning)
# Strip off "arg" at end of first ifmod path
return if_mods[0].rpartition("arg")[0]
def create_ifmod(self, aug_conf_path, mod, beginning=False):
"""Creates a new <IfMod mod> and returns its path.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
:param bool beginning: If the IfModule should be created to the beginning
of augeas path DOM tree.
:returns: Augeas path of the newly created IfModule directive.
The path may be dynamic, i.e. .../IfModule[last()]
:rtype: str
"""
if beginning:
c_path_arg = "{}/IfModule[1]/arg".format(aug_conf_path)
# Insert IfModule before the first directive
self.aug.insert("{}/directive[1]".format(aug_conf_path),
"IfModule", True)
retpath = "{}/IfModule[1]/".format(aug_conf_path)
else:
c_path = "{}/IfModule[last() + 1]".format(aug_conf_path)
c_path_arg = "{}/IfModule[last()]/arg".format(aug_conf_path)
self.aug.set(c_path, "")
retpath = "{}/IfModule[last()]/".format(aug_conf_path)
self.aug.set(c_path_arg, mod)
return retpath
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def add_dir_beginning(self, aug_conf_path, dirname, args):
"""Adds the directive to the beginning of defined aug_conf_path.
:param str aug_conf_path: Augeas configuration path to add directive
:param str dirname: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
first_dir = aug_conf_path + "/directive[1]"
self.aug.insert(first_dir, "directive", True)
self.aug.set(first_dir, dirname)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(first_dir + "/arg[%d]" % (i), value)
else:
self.aug.set(first_dir + "/arg", args)
def add_comment(self, aug_conf_path, comment):
"""Adds the comment to the augeas path
:param str aug_conf_path: Augeas configuration path to add directive
:param str comment: Comment content
"""
self.aug.set(aug_conf_path + "/#comment[last() + 1]", comment)
def find_comments(self, arg, start=None):
"""Finds a comment with specified content from the provided DOM path
:param str arg: Comment content to search
:param str start: Beginning Augeas path to begin looking
:returns: List of augeas paths containing the comment content
:rtype: list
"""
if not start:
start = get_aug_path(self.root)
comments = self.aug.match("%s//*[label() = '#comment']" % start)
results = []
for comment in comments:
c_content = self.aug.get(comment)
if c_content and arg in c_content:
results.append(comment)
return results
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self.exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches: List[str] = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ in ("include", "includeoptional"):
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_all_args(self, match):
"""
Tries to fetch all arguments for a directive. See get_arg.
Note that if match is an ancestor node, it returns all names of
child directives as well as the list of arguments.
"""
if match[-1] != "/":
match = match+"/"
allargs = self.aug.match(match + '*')
return [self.get_arg(arg) for arg in allargs]
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
if not value:
return None
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def get_root_augpath(self):
"""
Returns the Augeas path of root configuration.
"""
return get_aug_path(self.loc["root"])
def exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules.keys()), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def standard_path_from_server_root(self, arg):
"""Ensure paths are consistent and absolute
:param str arg: Argument of directive
:returns: Standardized argument path
:rtype: str
"""
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
return arg
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
arg = self.standard_path_from_server_root(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self.parse_file(os.path.join(arg, "*"))
else:
self.parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into an augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match):
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
# Since Python 3.6, it returns a different pattern like (?s:.*\.load)\Z
return fnmatch.translate(clean_fn_match)[4:-3] # pragma: no cover
def parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Ensure that we have the latest Augeas DOM state on disk before
# calling aug.load() which reloads the state from disk
self.ensure_augeas_state()
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def parsed_in_current(self, filep):
"""Checks if the file path is parsed by current Augeas parser config
ie. returns True if the file is found on a path that's found in live
Augeas configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.parser_paths)
def parsed_in_original(self, filep):
"""Checks if the file path is parsed by existing Apache config.
ie. returns True if the file is found on a path that matches Include or
IncludeOptional statement in the Apache configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.existing_paths)
def _parsed_by_parser_paths(self, filep, paths):
"""Helper function that searches through provided paths and returns
True if file path is found in the set"""
for directory in paths:
for filename in paths[directory]:
if fnmatch.fnmatch(filep, os.path.join(directory, filename)):
return True
return False
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
remove_old = new_file_match == "*"
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join("[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string))
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
|
the-stack_0_22284 | '''
Copyright (C) 2018 CG Cookie
https://github.com/CGCookie/retopoflow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import bpy
import bgl
from .cookiecutter import CookieCutter
from ..common.maths import Point2D
from ..common import ui
from ..common.drawing import Drawing
class CookieCutter_Test(CookieCutter):
bl_idname = "view3d.cookiecutter_test"
bl_label = "CookieCutter Test (Example)"
default_keymap = {
'commit': 'RET',
'cancel': 'ESC',
'grab': 'G',
}
def start(self):
opts = {
'pos': 9,
'movable': True,
'bgcolor': (0.2, 0.2, 0.2, 0.8),
'padding': 0,
}
win = self.wm.create_window('test', opts)
self.lbl = win.add(ui.UI_Label('main'))
self.ui_action = win.add(ui.UI_Label('nothing'))
exitbuttons = win.add(ui.UI_Container(margin=0,vertical=False))
exitbuttons.add(ui.UI_Button('commit', self.done))
exitbuttons.add(ui.UI_Button('cancel', lambda:self.done(cancel=True)))
#self.window_manager.set_focus(win, darken=False, close_on_leave=True)
def update(self):
self.ui_action.set_label('Press: %s' % (','.join(self.actions.now_pressed.keys()),))
@CookieCutter.FSM_State('main')
def modal_main(self):
Drawing.set_cursor('DEFAULT')
if self.actions.pressed('grab'):
self.lbl.set_label('grab!')
return 'grab'
@CookieCutter.FSM_State('grab')
def modal_grab(self):
Drawing.set_cursor('HAND')
if self.actions.pressed('commit'):
self.lbl.set_label('commit grab')
return 'main'
if self.actions.pressed('cancel'):
self.lbl.set_label('cancel grab')
return 'main'
@CookieCutter.Draw('pre3d')
def draw_preview(self):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glBegin(bgl.GL_QUADS) # TODO: not use immediate mode
bgl.glColor4f(0,0,0.2,0.5)
bgl.glVertex2f(-1, -1)
bgl.glVertex2f( 1, -1)
bgl.glColor4f(0,0,0.2,0)
bgl.glVertex2f( 1, 1)
bgl.glVertex2f(-1, 1)
bgl.glEnd()
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPopAttrib()
@CookieCutter.Draw('post3d')
def draw_postview(self):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glBegin(bgl.GL_QUADS) # TODO: not use immediate mode
bgl.glColor4f(0,0,0,0)
bgl.glVertex2f(-1, -1)
bgl.glVertex2f( 1, -1)
bgl.glColor4f(0,0.2,0,0.5)
bgl.glVertex2f( 1, 1)
bgl.glVertex2f(-1, 1)
bgl.glEnd()
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPopAttrib()
@CookieCutter.Draw('post2d')
def draw_postpixel(self):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS)
bgl.glEnable(bgl.GL_BLEND)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glColor4f(1,0,0,0.2) # TODO: use window background color??
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glBegin(bgl.GL_QUADS) # TODO: not use immediate mode
bgl.glVertex2f(-1, -1)
bgl.glVertex2f( 1, -1)
bgl.glVertex2f( 1, 1)
bgl.glVertex2f(-1, 1)
bgl.glEnd()
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPopAttrib()
|
the-stack_0_22285 | from aleph.tests.util import TestCase
from aleph.logic.extractors.aggregate import EntityAggregator
from aleph.logic.extractors.extract import extract_entities
class TestNER(TestCase):
def test_ner_service(self):
ctx = EntityAggregator()
text = """This is a document about the United States. But also about
Syria and Germany.
"""
text = text + text + text + text
entities = extract_entities(ctx, text, 'en')
entities = [str(r) for r in entities]
assert 'United States' in entities, entities
assert 'Germany' in entities, entities
assert 'Syria' in entities, entities
|
the-stack_0_22286 |
import enaml
from enaml.qt.qt_application import QtApplication
from enaml.qt.qt_factories import QT_FACTORIES
from enaml.application import ProxyResolver
# stub for now
CUSTOM_FACTORIES = {}
class App(QtApplication):
def __init__(self):
super(App, self).__init__()
factories = dict(QT_FACTORIES)
factories.update(CUSTOM_FACTORIES)
self.resolver = ProxyResolver(factories=factories)
def main():
app = App()
with enaml.imports():
from pystudio import Main
view = Main()
view.show()
app.start()
if __name__ == "__main__":
main()
|
the-stack_0_22287 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import EncoCoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
Decimal,
disconnect_nodes,
sync_blocks,
sync_mempools
)
class AbandonConflictTest(EncoCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-minrelaytxfee=0.00001"],[]]
def run_test(self):
self.nodes[0].generate(5)
sync_blocks(self.nodes)
self.nodes[1].generate(110)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == 10)
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == 10)
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == 10)
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 14.99998
outputs[self.nodes[1].getnewaddress()] = 5
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 24.9996
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
inputs = []
inputs.append({"txid":txABC2, "vout":0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 24.999
signed3 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, Decimal(round(balance - Decimal("30") + Decimal(24.999), 8)))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.999"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = 9.9999
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
|
the-stack_0_22288 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
from pyasn1 import error
from pyasn1.codec.cer import encoder
from pyasn1.type import univ
__all__ = ['Encoder', 'encode']
class SetEncoder(encoder.SetEncoder):
@staticmethod
def _componentSortKey(componentAndType):
"""Sort SET components by tag
Sort depending on the actual Choice value (dynamic sort)
"""
component, asn1Spec = componentAndType
if asn1Spec is None:
compType = component
else:
compType = asn1Spec
if compType.typeId == univ.Choice.typeId and not compType.tagSet:
if asn1Spec is None:
return component.getComponent().tagSet
else:
# TODO: move out of sorting key function
names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
if namedType.name in component]
if len(names) != 1:
raise error.PyAsn1Error(
'%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', component))
# TODO: support nested CHOICE ordering
return asn1Spec[names[0]].tagSet
else:
return compType.tagSet
TAG_MAP = encoder.TAG_MAP.copy()
TAG_MAP.update({
# Set & SetOf have same tags
univ.Set.tagSet: SetEncoder()
})
TYPE_MAP = encoder.TYPE_MAP.copy()
TYPE_MAP.update({
# Set & SetOf have same tags
univ.Set.typeId: SetEncoder()
})
class SingleItemEncoder(encoder.SingleItemEncoder):
fixedDefLengthMode = True
fixedChunkSize = 0
TAG_MAP = TAG_MAP
TYPE_MAP = TYPE_MAP
class Encoder(encoder.Encoder):
SINGLE_ITEM_ENCODER = SingleItemEncoder
#: Turns ASN.1 object into DER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a DER octet stream.
#:
#: Parameters
#: ----------
#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
#: parameter is required to guide the encoding process.
#:
#: Keyword Args
#: ------------
#: asn1Spec:
#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octet-stream
#:
#: Raises
#: ------
#: ~pyasn1.error.PyAsn1Error
#: On encoding errors
#:
#: Examples
#: --------
#: Encode Python value into DER with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> encode([1, 2, 3], asn1Spec=seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
#: Encode ASN.1 value object into DER
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> seq.extend([1, 2, 3])
#: >>> encode(seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
encode = Encoder()
|
the-stack_0_22294 | # -*- coding: utf-8 -*-
from unittest import TestCase
import QUANTAXIS as QA
from QUANTAXIS.QAFetch import QATdx
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_day, select_best_ip, ping
from QUANTAXIS.QAUtil.QASetting import QA_Setting
import datetime
class TestSelect_best_ip(TestCase):
def test_select_best_ip(self):
best_ip = select_best_ip()
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
self.assertTrue(isinstance(ip, str), '未获取到ip')
self.assertTrue(isinstance(port, int), '未获取到端口号')
self.assertTrue(ping(ip, port, 'stock') < datetime.timedelta(0, 1, 0), '地址ping不通: {} {} {}'.format(ip, port, ping(ip, port, 'stock')))
# ip = best_ip['future']['ip']
# port = best_ip['future']['port']
# self.assertTrue(ping(ip, port, 'stock') < datetime.timedelta(0, 1, 0), '地址ping不通: {} {} {}'.format(ip, port, ping(ip, port, 'stock')))
code = '000001'
days = 300
start = datetime.datetime.now().date() - datetime.timedelta(days)
end = datetime.datetime.now().date() - datetime.timedelta(10)
data = QA_fetch_get_stock_day(code, start_date=start, end_date=end)
print(data)
self.assertTrue(len(data) > (end - start).days / 2,
'返回数据个数不匹配,数据长度:{},天数(包含节假日):{}'.format(len(data), (end - start).days / 2))
default_ip = {'stock': {'ip': None, 'port': None},
'future': {'ip': None, 'port': None}}
qasetting = QA_Setting()
qasetting.set_config(
section='IPLIST', option='default', default_value=default_ip)
best_ip = select_best_ip()
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
self.assertTrue(isinstance(ip, str) or ip is None, '未获取到ip')
self.assertTrue(isinstance(port, int) or port is None, '未获取到端口号')
ip = best_ip['future']['ip']
port = best_ip['future']['port']
self.assertTrue(isinstance(ip, str) or ip is None, '未获取到ip')
self.assertTrue(isinstance(port, int) or port is None, '未获取到端口号')
data = QA_fetch_get_stock_day(code, start, end)
self.assertTrue(len(data) > (end - start).days / 2,
'返回数据个数不匹配,数据长度:{},天数(包含节假日):{}'.format(len(data), (end - start).days / 2))
|
the-stack_0_22296 | import random
import math
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
class LearningAgent(Agent):
""" An agent that learns to drive in the Smartcab world.
This is the object you will be modifying. """
def __init__(self, env, learning=False, epsilon=1.0, alpha=0.5):
super(LearningAgent, self).__init__(env) # Set the agent in the environment
self.planner = RoutePlanner(self.env, self) # Create a route planner
self.valid_actions = self.env.valid_actions # The set of valid actions
# Set parameters of the learning agent
self.learning = learning # Whether the agent is expected to learn
self.Q = dict() # Create a Q-table which will be a dictionary of tuples
self.epsilon = epsilon # Random exploration factor
self.alpha = alpha # Learning factor
###########
## TO DO ##
###########
# Set any additional class parameters as needed
self.t = 1
self.a = 0.01
def reset(self, destination=None, testing=False):
""" The reset function is called at the beginning of each trial.
'testing' is set to True if testing trials are being used
once training trials have completed. """
# Select the destination as the new location to route to
self.planner.route_to(destination)
###########
## TO DO ##
###########
# Update epsilon using a decay function of your choice
# Update additional class parameters as needed
# If 'testing' is True, set epsilon and alpha to 0
if testing:
self.epsilon = 0
self.alpha = 0
else:
# self.epsilon = self.epsilon - 0.05 #Linear decay
self.epsilon = math.exp(-self.a*self.t) #Exponential decay
self.t += 1
# self.epsilon = self.a**self.t
# self.t += 1
# self.epsilon = 1 / self.t**2
# self.t += 1
# self.epsilon = math.cos(self.a*self.t)
# self.t += 1
return None
def build_state(self):
""" The build_state function is called when the agent requests data from the
environment. The next waypoint, the intersection inputs, and the deadline
are all features available to the agent. """
# Collect data about the environment
waypoint = self.planner.next_waypoint() # The next waypoint
inputs = self.env.sense(self) # Visual input - intersection light and traffic
deadline = self.env.get_deadline(self) # Remaining deadline
del inputs['right']
###########
## TO DO ##
###########
# NOTE : you are not allowed to engineer features outside of the inputs available.
# Because the aim of this project is to teach Reinforcement Learning, we have placed
# constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.
# With the hand-engineered features, this learning process gets entirely negated.
# Set 'state' as a tuple of relevant data for the agent
# state = (waypoint, inputs)
return (waypoint,) + tuple(inputs.values())
def get_maxQ(self, state):
""" The get_max_Q function is called when the agent is asked to find the
maximum Q-value of all actions based on the 'state' the smartcab is in. """
###########
## TO DO ##
###########
# Calculate the maximum Q-value of all actions for a given state
maxQ = None
for action, q_value in self.Q[state].items():
if (maxQ == None) or (maxQ < q_value):
maxQ = q_value
return maxQ
def createQ(self, state):
""" The createQ function is called when a state is generated by the agent. """
###########
## TO DO ##
###########
if self.learning: # When learning, check if the 'state' is not in the Q-table
if not (state in self.Q.keys()): # If it is not, create a new dictionary for that state
self.Q[state] = {None: 0.0, 'forward': 0.0, 'left': 0.0 , 'right': 0.0} # Then, for each action available, set the initial Q-value to 0.0
return
def choose_action(self, state):
""" The choose_action function is called when the agent is asked to choose
which action to take, based on the 'state' the smartcab is in. """
# Set the agent state and default action
self.state = state
self.next_waypoint = self.planner.next_waypoint()
###########
## TO DO ##
###########
if self.learning: # When learning, choose a random action
if (random.random() < self.epsilon):
action = random.choice(self.valid_actions)
else: # Otherwise, choose an action with the highest Q-value for the current state
maxQ = self.get_maxQ(state)
best_actions = []
for action, q_value in self.Q[state].items(): # Be sure that when choosing an action with highest Q-value that you randomly select between actions that "tie".
if q_value == maxQ:
best_actions.append(action)
action = random.choice(best_actions)
else:# When not learning, choose a random action
action = random.choice(self.valid_actions)
return action
def learn(self, state, action, reward):
""" The learn function is called after the agent completes an action and
receives a reward. This function does not consider future rewards
when conducting learning. """
###########
## TO DO ##
###########
# When learning, implement the value iteration update rule
# Use only the learning rate 'alpha' (do not use the discount factor 'gamma')
if self.learning:
self.Q[state][action] = (1 - self.alpha) * self.Q[state][action] + self.alpha * reward
return
def update(self):
""" The update function is called when a time step is completed in the
environment for a given trial. This function will build the agent
state, choose an action, receive a reward, and learn if enabled. """
state = self.build_state() # Get current state
self.createQ(state) # Create 'state' in Q-table
action = self.choose_action(state) # Choose an action
reward = self.env.act(self, action) # Receive a reward
self.learn(state, action, reward) # Q-learn
return
def run():
""" Driving function for running the simulation.
Press ESC to close the simulation, or [SPACE] to pause the simulation. """
##############
# Create the environment
# Flags:
# verbose - set to True to display additional output from the simulation
# num_dummies - discrete number of dummy agents in the environment, default is 100
# grid_size - discrete number of intersections (columns, rows), default is (8, 6)
env = Environment(verbose=True)
##############
# Create the driving agent
# Flags:
# learning - set to True to force the driving agent to use Q-learning
# * epsilon - continuous value for the exploration factor, default is 1
# * alpha - continuous value for the learning rate, default is 0.5
agent = env.create_agent(LearningAgent, learning = True, alpha=0.5, epsilon=1)
##############
# Follow the driving agent
# Flags:
# enforce_deadline - set to True to enforce a deadline metric
env.set_primary_agent(agent, enforce_deadline = True)
##############
# Create the simulation
# Flags:
# update_delay - continuous time (in seconds) between actions, default is 2.0 seconds
# display - set to False to disable the GUI if PyGame is enabled
# log_metrics - set to True to log trial and simulation results to /logs
# optimized - set to True to change the default log file name
sim = Simulator(env, update_delay = 0.01, log_metrics = True, display=True, optimized=True)
##############
# Run the simulator
# Flags:
# tolerance - epsilon tolerance before beginning testing, default is 0.05
# n_test - discrete number of testing trials to perform, default is 0
sim.run(n_test = 10, tolerance=0.05)
if __name__ == '__main__':
run()
|
the-stack_0_22298 | import discord
from redbot.core import commands
from redbot.core import Config
from redbot.core import checks
from redbot.core.i18n import Translator, cog_i18n
import aiohttp
import asyncio
from datetime import date
from io import BytesIO
import logging
import time
_ = Translator('GiftAway', __file__)
class GuildConvert(commands.Converter):
"""Attempts to convert a value into a guild object."""
async def convert(self, ctx, value):
try:
guild = ctx.bot.get_guild(int(value))
if guild is not None:
return guild
raise commands.BadArgument(_('Could not find guild `{value}`.').format(value=value))
except ValueError:
for guild in ctx.bot.guilds:
if guild.name == value:
return guild
raise commands.BadArgument(_('Could not find guild `{value}`.').format(value=value))
class GiftError(RuntimeError):
"""Generic error for the Gift class."""
pass
class Gift:
"""Object representing a specific gift."""
def __repr__(self):
return f'<Gift game_name={self.game_name!r}, invoke_id={self.invoke_id!r}>'
@classmethod
async def create(cls, cog, ctx, channels: list, game_name: str, keys: list):
obj = cls()
obj.cog = cog
obj.author = ctx.author
obj.invoke_id = str(ctx.message.id) #keys auto cast to str, avoid confusion
obj.game_name = game_name
obj.keys = keys
obj.claimed = []
obj.claimed_by_text = []
obj.claimed_by_id = []
obj.link_url = None
obj.cover_url = None
obj.fields = []
if len(keys) == 0:
raise GiftError(_('At least one key must be provided.'))
if not channels:
raise GiftError(_('No channels provided.'))
await obj.get_game_data()
embed = obj.gen_embed()
messages = await asyncio.gather(*(channel.send(embed=embed) for channel in channels), return_exceptions=True)
#filter exceptions
obj.messages = [x for x in messages if isinstance(x, discord.Message)]
asyncio.gather(*(message.add_reaction('\N{WHITE HEAVY CHECK MARK}') for message in obj.messages), return_exceptions=True)
return obj
@classmethod
async def from_dict(cls, cog, invoke_id, dict):
obj = cls()
obj.cog = cog
author = cog.bot.get_user(dict['author'])
if not author:
raise GiftError(_('Could not find the author.'))
obj.author = author
obj.invoke_id = invoke_id
obj.game_name = dict['game_name']
obj.keys = dict['keys']
obj.claimed = dict['claimed']
obj.claimed_by_id = dict['claimed_by_id']
obj.claimed_by_text = dict['claimed_by_text']
obj.link_url = dict['link_url']
obj.cover_url = dict['cover_url']
obj.fields = dict['fields']
messages = []
for message_data in dict['messages']:
g = cog.bot.get_guild(message_data[0])
if not g:
continue
c = g.get_channel(message_data[1])
if not c:
continue
try:
m = await c.fetch_message(message_data[2])
except discord.NotFound:
continue
messages.append(m)
if not messages:
raise GiftError(_('No messages could be found.'))
obj.messages = messages
return obj
def to_dict(self):
return self.invoke_id, {
'author': self.author.id,
'game_name': self.game_name,
'keys': self.keys.copy(),
'claimed': self.claimed.copy(),
'claimed_by_id': self.claimed_by_id.copy(),
'claimed_by_text': self.claimed_by_text.copy(),
'link_url': self.link_url,
'cover_url': self.cover_url,
'fields': self.fields.copy(),
'messages': [[message.guild.id, message.channel.id, message.id] for message in self.messages]
}
def gen_embed(self):
total = len(self.keys) + len(self.claimed)
if self.keys:
desc = _(
'Click the reaction below to grab a key.\n\n'
'Currently available: **{top}/{bottom}**'
).format(top=len(self.keys), bottom=total)
else:
desc = _('All keys have been claimed!')
if self.claimed_by_text:
desc += _('\n\nGrabbed by:')
for text in self.claimed_by_text:
desc += text
embed = discord.Embed(
title=_(
'{author} is gifting {num} keys for **{game}**.'
).format(author=self.author.display_name, num=total, game=self.game_name),
description=desc,
url = self.link_url or discord.Embed.Empty
)
for field in self.fields:
embed.add_field(name=field[0], value=field[1], inline=False)
if self.cover_url:
embed.set_image(url=self.cover_url)
return embed
async def get_game_data(self):
"""Get some data for a game from IGDB"""
client_id, access_token = await self.cog._get_api_keys()
if not (client_id and access_token):
return
URL_BASE = 'https://api.igdb.com/v4'
headers = {'Accept': 'application/json', 'Client-ID': client_id, 'Authorization': f'Bearer {access_token}'}
async with aiohttp.ClientSession() as session:
async with session.post(
URL_BASE + '/games',
headers=headers,
data=f'search "{self.game_name}"; fields cover,first_release_date,genres,rating,summary,url,websites; limit 1;'
) as response:
resp = await response.json(content_type=None)
#The game could not be found
if not resp:
return
game = resp[0]
released = game.get('first_release_date', None)
rating = game.get('rating', None)
summary = game.get('summary', None)
game_url = game.get('url', None)
cover_id = game.get('cover', None)
if game.get('genres', None):
genre_ids = '(' + ','.join(str(g) for g in game['genres']) + ')'
else:
genre_ids = None
if game.get('websites', None):
website_ids = '(' + ','.join(str(w) for w in game['websites']) + ')'
else:
website_ids = None
if cover_id:
async with session.post(
URL_BASE + '/covers',
headers=headers,
data=f'where id = {cover_id}; fields url; limit 1;'
) as response:
resp = await response.json(content_type=None)
if resp:
cover_url = resp[0]['url'][2:].replace('t_thumb', 't_cover_big_2x')
self.cover_url = 'https://' + cover_url
if genre_ids:
async with session.post(
URL_BASE + '/genres',
headers=headers,
data=f'where id = {genre_ids}; fields name;'
) as response:
resp = await response.json(content_type=None)
genres = [g['name'] for g in resp]
else:
genres = None
if website_ids:
async with session.post(
URL_BASE + '/websites',
headers=headers,
data=f'where id = {website_ids} & category = 1; fields url; limit 1;'
) as response:
resp = await response.json(content_type=None)
if not resp:
website = None
else:
website = resp[0]['url']
else:
website = None
game_info = ''
if released:
game_info += _('**Released:** {released}\n').format(released=date.fromtimestamp(released))
if genres:
game_info += _('**Genres:** {genres}\n').format(genres=", ".join(genres))
if rating:
game_info += _('**Rating:** {rating:.1f}').format(rating=rating)
self.link_url = website or game_url
if game_info:
self.fields.append([_('Game info'), game_info])
if summary:
self.fields.append([_('Summary'), summary[:1000]])
async def give_key(self, member):
"""Give one of the keys to a particular user."""
key = self.keys.pop(0)
self.claimed_by_id.append(member.id)
self.claimed_by_text.append(_(
'\n**{name}** in **{guild}**'
).format(name=member.display_name, guild=member.guild.name))
self.claimed.append(key)
await member.send(_('Here is your key for `{game}`: `{key}`').format(game=self.game_name, key=key))
await self.refresh_messages()
if len(self.keys) == 0:
async with self.cog.config.gifts() as gifts:
del gifts[self.invoke_id]
self.cog.gifts.remove(self)
else:
async with self.cog.config.gifts() as gifts:
gifts[self.invoke_id] = self.to_dict()[1]
async def refresh_messages(self):
"""Edits all existing messages to match the current state of the gift."""
embed = self.gen_embed()
await asyncio.gather(*(message.edit(embed=embed) for message in self.messages))
@cog_i18n(_)
class GiftAway(commands.Cog):
"""Create grabbable key giveaways."""
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger('red.flamecogs.giftaway')
self.config = Config.get_conf(self, identifier=145519400223506432)
self.config.register_global(
gifts = {}
)
self.config.register_guild(
giftChannel = None
)
self.gifts = []
self.access_token = None
self.token_expire = 0
asyncio.create_task(self.setup())
async def setup(self):
await self.bot.wait_until_red_ready()
to_del = []
async with self.config.gifts() as data:
for invoke_id in data:
try:
gift = await Gift.from_dict(self, invoke_id, data[invoke_id])
except GiftError as e:
self.log.warning(
f'Game {data[invoke_id]["game_name"]} by {data[invoke_id]["author"]} '
'could not be created and has been deleted.'
)
to_del.append(invoke_id)
continue
self.gifts.append(gift)
for invoke_id in to_del:
del data[invoke_id]
@commands.command(aliases=['ga'])
async def giftaway(self, ctx, guild: GuildConvert, game_name, *keys):
"""
Giftaway a key to a specific server.
Wrap any parameters that require spaces in quotes.
"""
try:
await ctx.message.delete()
except:
pass
cid = await self.config.guild(guild).giftChannel()
if not cid:
return await ctx.send(_('That guild has not set up a giftaway channel.'))
channel = guild.get_channel(cid)
if not channel:
return await ctx.send(_('That giftaway channel for that guild does not exist.'))
if not guild.me.permissions_in(channel).embed_links:
return await ctx.send(_('I do not have permission to send embeds in the giftaway channel.'))
try:
gift = await Gift.create(self, ctx, [channel], game_name, list(keys))
except GiftError as e:
return await ctx.send(e)
self.gifts.append(gift)
key, value = gift.to_dict()
async with self.config.gifts() as gifts:
gifts[key] = value
await ctx.tick()
@commands.command(aliases=['gg'])
async def globalgift(self, ctx, game_name, *keys):
"""
Giftaway a key to all servers.
Wrap any parameters that require spaces in quotes.
"""
try:
await ctx.message.delete()
except:
pass
guilds = []
for guild in self.bot.guilds:
cid = await self.config.guild(guild).giftChannel()
if not cid:
continue
channel = guild.get_channel(cid)
if not channel:
continue
if not guild.me.permissions_in(channel).embed_links:
continue
guilds.append(channel)
try:
gift = await Gift.create(self, ctx, guilds, game_name, list(keys))
except GiftError as e:
return await ctx.send(e)
self.gifts.append(gift)
key, value = gift.to_dict()
async with self.config.gifts() as gifts:
gifts[key] = value
await ctx.tick()
@commands.guild_only()
@commands.command()
async def giftat(self, ctx, channel: discord.TextChannel, game_name, *keys):
"""
Giftaway a key to a specific channel.
You probably should run this command from a location people can't see to protect the keys.
Wrap any parameters that require spaces in quotes.
"""
try:
await ctx.message.delete()
except:
pass
if not ctx.guild.me.permissions_in(channel).embed_links:
return await ctx.send(_('I do not have permission to send embeds in the giftaway channel.'))
try:
gift = await Gift.create(self, ctx, [channel], game_name, list(keys))
except GiftError as e:
return await ctx.send(e)
self.gifts.append(gift)
key, value = gift.to_dict()
async with self.config.gifts() as gifts:
gifts[key] = value
await ctx.tick()
@commands.guild_only()
@commands.group()
async def giftawayset(self, ctx):
"""Group command for giftaway."""
pass
@giftawayset.group(invoke_without_command=True)
async def channel(self, ctx, channel: discord.TextChannel=None):
"""Set the channel that giftaway messages will be sent to in this server."""
if channel is None:
cid = await self.config.guild(ctx.guild).giftChannel()
if cid is None:
return await ctx.send(_('The giftaway channel has not been set up.'))
channel = ctx.guild.get_channel(cid)
if channel is None:
await self.config.guild(ctx.guild).giftChannel.set(None)
return await ctx.send(_('The giftaway channel has been deleted or could not be found.'))
await ctx.send(_('The current giftaway channel is {channel}.').format(channel=channel.mention))
else:
await self.config.guild(ctx.guild).giftChannel.set(channel.id)
await ctx.send(_('The giftaway channel is now {channel}.').format(channel=channel.mention))
@channel.command()
async def remove(self, ctx):
"""Remove the giftaway channel from this server and stop receiving giftaway messages."""
await self.config.guild(ctx.guild).giftChannel.set(None)
await ctx.send(_('Removed.'))
async def _get_api_keys(self):
"""Gets an up to date client id and access token."""
api = await self.bot.get_shared_api_tokens('igdb')
old_key = api.get('key')
if old_key:
await self.bot.remove_shared_api_tokens('igdb', 'key')
await self.bot.send_to_owners(
'Hi! You had previously set an api key for `GiftAway` to allow the cog to display info about the game being given away. '
'However, that API has recently changed and you need to create a new API key to continue using this functionality. '
'Please follow the instructions at <https://github.com/Flame442/FlameCogs/blob/master/giftaway/setup.md> '
'if you want to re-enable that functionality, and have a good day! (This message will NOT be sent again)'
)
return (None, None)
client_id = api.get('id')
secret = api.get('secret')
if not (client_id and secret):
return (None, None)
if self.access_token and time.time() < self.token_expire:
return (client_id, self.access_token)
async with aiohttp.ClientSession() as session:
async with session.post(
f'https://id.twitch.tv/oauth2/token?client_id={client_id}&client_secret={secret}&grant_type=client_credentials'
) as response:
resp = await response.json(content_type=None)
if msg := resp.get('message'):
self.log.warning(f'Got an error response getting the access token: {msg}')
return (None, None)
self.access_token = resp['access_token']
self.token_expire = time.time() + resp['expires_in'] - 5
return (client_id, self.access_token)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.member is None:
return
member = payload.member
if member.bot:
return
if await self.bot.cog_disabled_in_guild(self, member.guild):
return
if str(payload.emoji) != '\N{WHITE HEAVY CHECK MARK}':
return
gift = None
for g in self.gifts:
if payload.message_id in [x.id for x in g.messages]:
gift = g
break
if not gift:
return
if not gift.keys:
return
if member.id in gift.claimed_by_id:
return
await gift.give_key(member)
|
the-stack_0_22304 | from muti_creep import *
from champion_genetic_algorithm import GA
from neural_network import MLP
import pygame
import numpy as np
from sys import exit
from pygame.locals import *
import time
pygame.init()
width = 1280
height = 720
screen = pygame.display.set_mode((width ,height), 0, 32)
Layers = (9, 15,8, 5)
Parameter = 323
CROSS_RATE = 0.3
MUTATE_RATE = 0.3
POP_SIZE = 500
N_GENERATIONS = 300
clock = pygame.time.Clock()
show_sensors = True
draw_screen = True
start=(99,621)
font = pygame.font.SysFont("arial", 32)
font_2 = pygame.font.SysFont("arial", 16)
creep_ga=[]
generation=0
distance_limit=100000
# pop=np.array(False)
# train_historys = np.zeros(2)
# np.save("data/train_historys_map7_2(9, 15,8, 3).npy", train_historys)
pop = np.load("data/parameter_EXB_train_2.npy")
# pop=np.array(False)
ga = GA(DNA_size=Parameter, cross_rate=CROSS_RATE, mutation_rate=MUTATE_RATE, pop_size=POP_SIZE,pop=pop)
world = World()
for creep_no in range(POP_SIZE):
creep = CREEP(world, creep_image, [start[0], start[1]], speed=1, direction=90+np.random.rand())
world.add_entity(creep)
creep_ga.append([])
while True:
clock.tick()
for idx, individual in enumerate(ga.pop):
creep_ga[idx] = MLP(individual, Layers)
while world.all_not_crashed :
for event in pygame.event.get():
if event.type == QUIT:
exit()
for idx, reading in enumerate(world.get_reading()):
creep_ga[idx].forward(reading)
action=np.vstack([np.argmax(creep_ga_one.p) for creep_ga_one in creep_ga])
# action = np.random.randint(0, 3, (POP_SIZE))
# print(world.get_distance().max())
text="max distance:"+str(world.get_distance().max())
text_2="Number of survivors:"+str(POP_SIZE-world.crash_num)
world.process(action)
world.render(screen)
screen.blit(font.render(text, True, (255, 0, 0)), (0, 0))
screen.blit(font_2.render(text_2, True, (255, 0, 0)), (0, 32))
# if world.get_distance().max()>distance_limit:
# distance_limit=world.get_distance().max*1.5
# break
pygame.display.update()
if world.all_not_crashed!=True:
generation += 1
print("generation",generation,text,"mean distance:",world.get_distance().mean())
distances=world.get_distance()
ga.evolve(distances)
world = World()
# train_historys = np.load("data/train_historys_map7_2(9, 15,8, 3).npy")
# train_history = np.hstack((distances.mean(),distances.max()))
# train_historys = np.vstack((train_historys, train_history))
# np.save("data/train_historys_map7_2(9, 15,8, 3).npy", train_historys)
# np.save("data/parameter_EXB_train_2.npy", ga.pop)
for creep_no in range(POP_SIZE):
creep = CREEP(world, creep_image, [start[0], start[1]], speed=2, direction=90+np.random.rand())
world.add_entity(creep)
|
the-stack_0_22306 | import os
from fire._version import __version__
from fire.utils import setRandomSeed, printDash
def initFire(cfg):
if cfg["cfg_verbose"]:
printDash()
print(cfg)
printDash()
print("[INFO] Fire verison: "+__version__)
os.environ["CUDA_VISIBLE_DEVICES"] = cfg['GPU_ID']
setRandomSeed(cfg['random_seed'])
if not os.path.exists(cfg['save_dir']):
os.makedirs(cfg['save_dir']) |
the-stack_0_22307 | __version__ = "0.1.0"
__version_info__ = tuple(
[
int(num) if num.isdigit() else num
for num in __version__.replace("-", ".", 1).split(".")
]
)
from miseq_portal.taskapp.celery import app as celery_app
__all__ = ['celery_app']
|
the-stack_0_22310 | # pyright: strict
from typing import Tuple, List, Union
import re
from .strip import strip_comments, strip_macros
def get_unscoped_blobs(filetext: str):
filetext = strip_comments(filetext)
filetext = strip_macros(filetext)
blobs: List[str] = []
bracket_count: int = 0
blob: str = ''
for c in filetext:
blob += c
if c == '{':
bracket_count += 1
elif c == '}':
bracket_count -= 1
if not bracket_count and c in ['}', ';']:
if re.search(r'^\s*(struct|typedef) ', blob, re.MULTILINE) and c != ';':
continue
blob = blob.strip()
blobs.append(blob)
blob = ''
return blobs
def get_func_name(funcdef: str) -> str:
match = re.search(r'(\S)*(?=\()', funcdef)
return '' if match is None else match.group()
def get_funcdefs(filetext: str) -> List[str]:
return [blob for blob in get_unscoped_blobs(filetext) if blob[-1] == '}']
def get_funcdecs(filetext: str) -> List[str]:
return [blob for blob in get_unscoped_blobs(filetext) if blob[-2:] == ');']
def sub_forwarded_func(tem_func: str) -> Union[Tuple[str, str], None]:
def get_modifier_regex(x: str) -> str:
return r'\b{}(?=[^\w])'.format(re.escape(x))
if re.search(get_modifier_regex('P'), tem_func):
val_func: str = tem_func
val_func = re.sub(get_modifier_regex('A'), '', val_func)
val_func = re.sub(get_modifier_regex('P'), '', val_func)
val_func = re.sub('_R', '', val_func)
ref_func: str = tem_func
ref_func = re.sub(get_modifier_regex('A'), '&', ref_func)
ref_func = re.sub(get_modifier_regex('P'), '*', ref_func)
ref_func = re.sub('_R', '_r', ref_func)
return (val_func, ref_func)
return None
def sub_forwarded_funcdefs(filetext: str) -> str:
for tem_funcdef in get_funcdefs(filetext):
forwarded_funcdefs = sub_forwarded_func(tem_funcdef)
if forwarded_funcdefs:
filetext = re.sub(re.escape(tem_funcdef), '\n\n'.join(forwarded_funcdefs), filetext)
return filetext
def sub_forwarded_funcdecs(filetext: str) -> str:
for tem_func_dec in get_funcdecs(filetext):
forwarded_funcdecs = sub_forwarded_func(tem_func_dec)
if forwarded_funcdecs:
filetext = re.sub(re.escape(tem_func_dec), '\n\n'.join(forwarded_funcdecs), filetext)
return filetext
|
the-stack_0_22311 | from autodesk.application.autodeskservice import AutoDeskService
from autodesk.application.deskservice import DeskService
from autodesk.application.sessionservice import SessionService
from autodesk.application.timeservice import TimeService
from autodesk.deskcontroller import DeskController
from autodesk.lightcontroller import LightController
from autodesk.model import Model
from autodesk.operation import Operation
from autodesk.scheduler import Scheduler
from autodesk.sqlitedatastore import SqliteDataStore
from autodesk.timer import Timer
class AutoDeskServiceFactory:
def __init__(self, database_path, pin_factory, limits, delay, motor_pins,
light_pins):
self.database_path = database_path
self.pin_factory = pin_factory
self.limits = limits
self.delay = delay
self.motor_pins = motor_pins
self.light_pins = light_pins
def create(self, loop):
operation = Operation()
timer = Timer(loop)
model = Model(SqliteDataStore(self.database_path))
scheduler = Scheduler(self.limits)
desk_controller = DeskController(
self.delay,
self.pin_factory.create_output(self.motor_pins[0]),
self.pin_factory.create_output(self.motor_pins[1]),
self.pin_factory.create_output(self.light_pins[0]))
light_controller = LightController(
self.pin_factory.create_output(self.light_pins[1]))
timer_service = TimeService()
session_service = SessionService(
model, light_controller, timer_service)
desk_service = DeskService(
operation, model, desk_controller, timer_service)
return AutoDeskService(
operation, scheduler, timer, timer_service, session_service,
desk_service)
|
the-stack_0_22312 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from multiprocessing import Pool
import os
import torch
from fairseq.binarizer import safe_readline
from fairseq.tokenizer import tokenize_line
from fairseq.data import data_utils
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
pad='<pad>',
eos='</s>',
unk='<unk>',
bos='<s>',
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return '\n'.join(self.string(t, bpe_symbol, escape_unk) for t in tensor)
def token_string(i):
if i == self.unk():
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, 'bos_index'):
sent = ' '.join(token_string(i) for i in tensor if (i != self.eos()) and (i != self.bos()))
else:
sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if padding_factor > 1:
i = 0
while threshold_nwords % padding_factor != 0:
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert len(new_symbols) % padding_factor == 0
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f, ignore_utf_errors=False):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f, ignore_utf_errors)
return d
def add_from_file(self, f, ignore_utf_errors=False):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
if not ignore_utf_errors:
with open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
else:
with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f))
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
idx = line.rfind(' ')
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
count = int(line[idx + 1:])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
def _save(self, f, kv_iterator):
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for k, v in kv_iterator:
print('{} {}'.format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(f, zip(ex_keys + self.symbols[self.nspecial:], ex_vals + self.count[self.nspecial:]))
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True,
consumer=None, append_eos=True, reverse_order=False):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers)
))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{}
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
the-stack_0_22313 | import copy
import logging
import os
import sys
from panda3d.bullet import BulletBodyNode
from metadrive.constants import TerminationState
def import_pygame():
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
return pygame
def setup_logger(debug=False):
logging.basicConfig(
level=logging.DEBUG if debug else logging.WARNING,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
)
def recursive_equal(data1, data2, need_assert=False):
from metadrive.utils.config import Config
if isinstance(data1, Config):
data1 = data1.get_dict()
if isinstance(data2, Config):
data2 = data2.get_dict()
if isinstance(data1, dict):
is_ins = isinstance(data2, dict)
key_right = set(data1.keys()) == set(data2.keys())
if need_assert:
assert is_ins and key_right, (data1.keys(), data2.keys())
if not (is_ins and key_right):
return False
ret = []
for k in data1:
ret.append(recursive_equal(data1[k], data2[k]))
return all(ret)
elif isinstance(data1, list):
len_right = len(data1) == len(data2)
is_ins = isinstance(data2, list)
if need_assert:
assert len_right and is_ins, (len(data1), len(data2), data1, data2)
if not (is_ins and len_right):
return False
ret = []
for i in range(len(data1)):
ret.append(recursive_equal(data1[i], data2[i]))
return all(ret)
else:
ret = data1 == data2
if need_assert:
assert ret, (type(data1), type(data2), data1, data2)
return ret
def is_mac():
return sys.platform == "darwin"
def is_win():
return sys.platform == "win32"
def concat_step_infos(step_info_list):
"""We only conduct simply shallow update here!"""
old_dict = dict()
for new_dict in step_info_list:
old_dict = merge_dicts(old_dict, new_dict, allow_new_keys=True, without_copy=True)
return old_dict
# The following two functions is copied from ray/tune/utils/util.py, raise_error and pgconfig support is added by us!
def merge_dicts(old_dict, new_dict, allow_new_keys=False, without_copy=False):
"""
Args:
old_dict (dict, Config): Dict 1.
new_dict (dict, Config): Dict 2.
raise_error (bool): Whether to raise error if new key is found.
Returns:
dict: A new dict that is d1 and d2 deep merged.
"""
old_dict = old_dict or dict()
new_dict = new_dict or dict()
if without_copy:
merged = old_dict
else:
merged = copy.deepcopy(old_dict)
_deep_update(
merged, new_dict, new_keys_allowed=allow_new_keys, allow_new_subkey_list=[], raise_error=not allow_new_keys
)
return merged
def _deep_update(
original,
new_dict,
new_keys_allowed=False,
allow_new_subkey_list=None,
override_all_if_type_changes=None,
raise_error=True
):
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
if raise_error:
raise Exception("Unknown config parameter `{}` ".format(k))
else:
continue
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
_deep_update(original[k], value, True, raise_error=raise_error)
# Non-allowed key.
else:
_deep_update(original[k], value, new_keys_allowed, raise_error=raise_error)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
def deprecation_warning(old, new, error=False) -> None:
"""Warns (via the `logger` object) or throws a deprecation warning/error.
Args:
old (str): A description of the "thing" that is to be deprecated.
new (Optional[str]): A description of the new "thing" that replaces it.
error (Optional[Union[bool, Exception]]): Whether or which exception to
throw. If True, throw ValueError. If False, just warn.
If Exception, throw that Exception.
"""
msg = "`{}` has been deprecated.{}".format(old, (" Use `{}` instead.".format(new) if new else ""))
if error is True:
raise ValueError(msg)
elif error and issubclass(error, Exception):
raise error(msg)
else:
logger = logging.getLogger(__name__)
logger.warning("DeprecationWarning: " + msg + " This will raise an error in the future!")
def get_object_from_node(node: BulletBodyNode):
"""
Use this api to get the python object from bullet RayCast/SweepTest/CollisionCallback result
"""
if node.getPythonTag(node.getName()) is None:
return None
from metadrive.engine.engine_utils import get_object
ret = node.getPythonTag(node.getName()).base_object_name
if isinstance(ret, str):
return get_object(ret)[ret]
else:
return ret
def auto_termination(vehicle, should_done):
return {TerminationState.MAX_STEP: True if should_done else False}
|
the-stack_0_22315 | from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.six.moves.urllib.parse import quote_plus
def custom_method(name, http_verb, http_path=None):
if http_verb not in ["get", "post", "delete"]:
raise ValueError(
"Invalid http_verb: %s. Must be one of 'get', 'post' or 'delete'"
% http_verb
)
if http_path is None:
http_path = name
def wrapper(cls):
def custom_method_request(cls, sid, **params):
url = "%s/%s/%s" % (
cls.class_url(),
quote_plus(util.utf8(sid)),
http_path,
)
return cls._static_request(http_verb, url, **params)
existing_method = getattr(cls, name, None)
if existing_method is None:
setattr(cls, name, classmethod(custom_method_request))
else:
# If a method with the same name we want to use already exists on
# the class, we assume it's an instance method. In this case, the
# new class method is prefixed with `_cls_`, and the original
# instance method is decorated with `util.class_method_variant` so
# that the new class method is called when the original method is
# called as a class method.
setattr(cls, "_cls_" + name, classmethod(custom_method_request))
instance_method = util.class_method_variant("_cls_" + name)(
existing_method
)
setattr(cls, name, instance_method)
return cls
return wrapper
|
the-stack_0_22317 | """Treadmill AWS image CLI
Create, delete and manage configurations of AWS images.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import gzip
import io
import logging
import click
from treadmill import cli
from treadmill import dnsutils
from treadmill import restclient
from treadmill_aws import cli as aws_cli
_LOGGER = logging.getLogger(__name__)
_EXCEPTIONS = []
_EXCEPTIONS.extend(restclient.CLI_REST_EXCEPTIONS)
_ON_EXCEPTIONS = cli.handle_exceptions(_EXCEPTIONS)
_REST_PATH = '/aws-image/'
def init(): # pylint: disable=R0912
"""Configures application monitor"""
formatter = cli.make_formatter('aws_image')
ctx = {}
@click.group()
@click.option('--api', help='API url to use.',
metavar='URL',
type=cli.LIST,
envvar='TREADMILL_AWSIMAGE_API')
@click.option('--srvrec', help='API srv record.',
envvar='TREADMILL_AWSIMAGE_API_SRVREC')
def image_group(api, srvrec):
"""Manage Treadmill app monitor configuration"""
ctx['api'] = api
if not ctx['api']:
ctx['api'] = []
if srvrec:
result = dnsutils.srv(srvrec, None)
for host, port, _p, _w in result:
ctx['api'].append('http://{}:{}'.format(host, port))
@image_group.command()
@click.argument('name')
@_ON_EXCEPTIONS
def configure(name):
"""Configure AWS image."""
restapi = ctx['api']
url = _REST_PATH + name
image_entry = restclient.get(restapi, url)
cli.out(formatter(image_entry.json()))
@image_group.command(name='list')
@_ON_EXCEPTIONS
def _list():
"""List AWS images."""
restapi = ctx['api']
url = _REST_PATH
response = restclient.get(ctx['api'], url)
cli.out(formatter(response.json()))
@image_group.command()
@click.argument('name', nargs=1, required=True)
@_ON_EXCEPTIONS
def delete(name):
"""Delete AWS image"""
restapi = ctx['api']
url = _REST_PATH + name
restclient.delete(restapi, url)
@image_group.command(name='create')
@click.option(
'--base-image',
required=True,
type=aws_cli.IMAGE,
help='Base image.'
)
@click.option(
'--base-image-account',
required=False,
help='Base image account.'
)
@click.option(
'--userdata',
required=True,
type=click.Path(exists=True),
multiple=True,
help='Cloud-init user data.'
)
@click.option(
'--instance-profile',
required=False,
help='Instance profile with create image privs.'
)
@click.option(
'--secgroup',
required=False,
type=aws_cli.SECGROUP,
help='Security group'
)
@click.option(
'--subnet',
required=False,
type=aws_cli.SUBNET,
help='Subnet'
)
@click.option(
'--key',
help='SSH key'
)
@click.argument('name', required=True, type=str)
@_ON_EXCEPTIONS
def create(base_image, base_image_account, userdata, instance_profile,
secgroup, subnet, key, name):
"""Create image"""
restapi = ctx['api']
payload = {
'base_image': base_image,
'base_image_account': base_image_account,
'instance_profile': instance_profile,
'userdata': [],
'secgroup': secgroup,
'subnet': subnet,
'key': key,
}
for filename in userdata:
with io.open(filename, 'rb') as f:
content = f.read()
if filename.endswith('.gz'):
content = gzip.decompress(content)
payload['userdata'].append(content.decode())
url = _REST_PATH + name
response = restclient.post(restapi, url, payload=payload)
cli.out(response.json().get('instance', '-'))
del delete
del _list
del configure
del create
return image_group
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.